var/home/core/zuul-output/0000755000175000017500000000000015112344236014526 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112357347015501 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005022711115112357340017675 0ustar rootrootNov 28 16:10:45 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 16:10:45 crc restorecon[4747]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:45 crc restorecon[4747]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 16:10:46 crc restorecon[4747]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 16:10:47 crc kubenswrapper[4954]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:47 crc kubenswrapper[4954]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 16:10:47 crc kubenswrapper[4954]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:47 crc kubenswrapper[4954]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:47 crc kubenswrapper[4954]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 16:10:47 crc kubenswrapper[4954]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.204913 4954 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209895 4954 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209927 4954 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209939 4954 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209949 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209959 4954 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209967 4954 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209975 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209985 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.209994 4954 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210002 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210010 4954 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210017 4954 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210025 4954 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210033 4954 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210040 4954 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210048 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210056 4954 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210063 4954 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210080 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210090 4954 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210097 4954 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210105 4954 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210113 4954 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210120 4954 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210128 4954 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210136 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210143 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210153 4954 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210165 4954 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210176 4954 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210185 4954 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210194 4954 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210202 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210212 4954 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210220 4954 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210228 4954 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210237 4954 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210244 4954 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210252 4954 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210259 4954 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210267 4954 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210275 4954 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210283 4954 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210290 4954 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210300 4954 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210310 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210319 4954 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210328 4954 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210336 4954 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210344 4954 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210353 4954 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210360 4954 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210368 4954 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210377 4954 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210384 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210392 4954 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210400 4954 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210409 4954 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210417 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210424 4954 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210433 4954 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210442 4954 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210449 4954 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210457 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210469 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210476 4954 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210484 4954 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210492 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210499 4954 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210507 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.210515 4954 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210714 4954 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210731 4954 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210744 4954 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210755 4954 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210766 4954 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210775 4954 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210787 4954 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210798 4954 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210807 4954 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210816 4954 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210826 4954 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210836 4954 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210845 4954 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210854 4954 flags.go:64] FLAG: --cgroup-root="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210863 4954 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210872 4954 flags.go:64] FLAG: --client-ca-file="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210881 4954 flags.go:64] FLAG: --cloud-config="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210890 4954 flags.go:64] FLAG: --cloud-provider="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210900 4954 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210955 4954 flags.go:64] FLAG: --cluster-domain="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210965 4954 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210975 4954 flags.go:64] FLAG: --config-dir="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210984 4954 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.210993 4954 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211005 4954 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211013 4954 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211023 4954 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211032 4954 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211041 4954 flags.go:64] FLAG: --contention-profiling="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211050 4954 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211059 4954 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211069 4954 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211078 4954 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211088 4954 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211098 4954 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211107 4954 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211116 4954 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211126 4954 flags.go:64] FLAG: --enable-server="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211134 4954 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211145 4954 flags.go:64] FLAG: --event-burst="100" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211154 4954 flags.go:64] FLAG: --event-qps="50" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211164 4954 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211173 4954 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211182 4954 flags.go:64] FLAG: --eviction-hard="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211193 4954 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211203 4954 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211212 4954 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211222 4954 flags.go:64] FLAG: --eviction-soft="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211231 4954 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211239 4954 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211249 4954 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211259 4954 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211268 4954 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211277 4954 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211285 4954 flags.go:64] FLAG: --feature-gates="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211296 4954 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211305 4954 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211314 4954 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211324 4954 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211333 4954 flags.go:64] FLAG: --healthz-port="10248" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211342 4954 flags.go:64] FLAG: --help="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211351 4954 flags.go:64] FLAG: --hostname-override="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211359 4954 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211368 4954 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211377 4954 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211387 4954 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211395 4954 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211404 4954 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211413 4954 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211422 4954 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211432 4954 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211441 4954 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211450 4954 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211459 4954 flags.go:64] FLAG: --kube-reserved="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211468 4954 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211477 4954 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211486 4954 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211494 4954 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211503 4954 flags.go:64] FLAG: --lock-file="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211512 4954 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211521 4954 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211567 4954 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211581 4954 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211590 4954 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211599 4954 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211608 4954 flags.go:64] FLAG: --logging-format="text" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211616 4954 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211626 4954 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211635 4954 flags.go:64] FLAG: --manifest-url="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211644 4954 flags.go:64] FLAG: --manifest-url-header="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211655 4954 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211664 4954 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211674 4954 flags.go:64] FLAG: --max-pods="110" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211683 4954 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211694 4954 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211703 4954 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211712 4954 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211721 4954 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211730 4954 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211739 4954 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211757 4954 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211766 4954 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211775 4954 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211784 4954 flags.go:64] FLAG: --pod-cidr="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211793 4954 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211806 4954 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211817 4954 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211827 4954 flags.go:64] FLAG: --pods-per-core="0" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211835 4954 flags.go:64] FLAG: --port="10250" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211845 4954 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211853 4954 flags.go:64] FLAG: --provider-id="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211862 4954 flags.go:64] FLAG: --qos-reserved="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211871 4954 flags.go:64] FLAG: --read-only-port="10255" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211880 4954 flags.go:64] FLAG: --register-node="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211889 4954 flags.go:64] FLAG: --register-schedulable="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211898 4954 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211913 4954 flags.go:64] FLAG: --registry-burst="10" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211922 4954 flags.go:64] FLAG: --registry-qps="5" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211930 4954 flags.go:64] FLAG: --reserved-cpus="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211939 4954 flags.go:64] FLAG: --reserved-memory="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211957 4954 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211966 4954 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211975 4954 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211984 4954 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.211993 4954 flags.go:64] FLAG: --runonce="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212002 4954 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212011 4954 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212020 4954 flags.go:64] FLAG: --seccomp-default="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212029 4954 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212038 4954 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212047 4954 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212056 4954 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212065 4954 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212074 4954 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212083 4954 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212092 4954 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212101 4954 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212110 4954 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212119 4954 flags.go:64] FLAG: --system-cgroups="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212128 4954 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212143 4954 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212152 4954 flags.go:64] FLAG: --tls-cert-file="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212162 4954 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212173 4954 flags.go:64] FLAG: --tls-min-version="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212181 4954 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212190 4954 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212199 4954 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212216 4954 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212225 4954 flags.go:64] FLAG: --v="2" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212236 4954 flags.go:64] FLAG: --version="false" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212248 4954 flags.go:64] FLAG: --vmodule="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212258 4954 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.212268 4954 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212459 4954 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212469 4954 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212478 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212488 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212499 4954 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212509 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212518 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212552 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212562 4954 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212572 4954 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212581 4954 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212589 4954 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212597 4954 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212605 4954 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212613 4954 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212621 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212628 4954 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212636 4954 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212643 4954 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212651 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212659 4954 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212666 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212674 4954 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212681 4954 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212689 4954 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212698 4954 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212709 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212716 4954 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212725 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212733 4954 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212741 4954 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212748 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212755 4954 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212763 4954 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212770 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212779 4954 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212786 4954 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212794 4954 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212801 4954 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212809 4954 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212817 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212825 4954 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212833 4954 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212840 4954 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212848 4954 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212857 4954 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212865 4954 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212872 4954 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212882 4954 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212892 4954 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212901 4954 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212909 4954 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212918 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212927 4954 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212935 4954 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212944 4954 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212952 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212959 4954 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212972 4954 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212981 4954 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212989 4954 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.212998 4954 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213006 4954 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213014 4954 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213022 4954 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213029 4954 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213037 4954 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213044 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213052 4954 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213062 4954 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.213073 4954 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.213087 4954 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.221217 4954 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.221233 4954 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221293 4954 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221299 4954 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221303 4954 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221307 4954 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221311 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221315 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221318 4954 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221322 4954 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221325 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221329 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221332 4954 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221336 4954 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221339 4954 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221342 4954 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221346 4954 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221349 4954 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221353 4954 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221356 4954 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221361 4954 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221367 4954 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221372 4954 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221376 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221380 4954 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221384 4954 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221388 4954 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221392 4954 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221396 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221399 4954 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221403 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221407 4954 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221410 4954 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221414 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221418 4954 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221422 4954 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221425 4954 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221428 4954 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221432 4954 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221435 4954 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221439 4954 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221442 4954 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221446 4954 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221449 4954 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221453 4954 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221456 4954 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221460 4954 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221463 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221466 4954 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221470 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221473 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221477 4954 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221480 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221484 4954 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221487 4954 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221490 4954 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221494 4954 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221498 4954 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221501 4954 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221505 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221509 4954 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221514 4954 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221518 4954 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221535 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221539 4954 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221544 4954 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221548 4954 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221551 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221556 4954 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221560 4954 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221563 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221567 4954 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221571 4954 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.221576 4954 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221674 4954 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221680 4954 feature_gate.go:330] unrecognized feature gate: Example Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221684 4954 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221689 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221693 4954 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221697 4954 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221701 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221704 4954 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221708 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221712 4954 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221716 4954 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221719 4954 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221723 4954 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221727 4954 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221730 4954 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221735 4954 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221740 4954 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221744 4954 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221748 4954 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221753 4954 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221757 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221761 4954 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221764 4954 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221768 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221772 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221776 4954 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221781 4954 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221786 4954 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221790 4954 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221794 4954 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221799 4954 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221802 4954 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221806 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221809 4954 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221813 4954 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221817 4954 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221820 4954 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221825 4954 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221829 4954 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221834 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221838 4954 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221842 4954 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221846 4954 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221851 4954 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221854 4954 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221858 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221861 4954 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221865 4954 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221868 4954 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221872 4954 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221876 4954 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221879 4954 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221883 4954 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221886 4954 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221889 4954 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221893 4954 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221897 4954 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221900 4954 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221904 4954 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221907 4954 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221911 4954 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221915 4954 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221918 4954 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221921 4954 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221925 4954 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221929 4954 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221932 4954 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221936 4954 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221939 4954 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221943 4954 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.221948 4954 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.221954 4954 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.222063 4954 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.229163 4954 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.229275 4954 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.230001 4954 server.go:997] "Starting client certificate rotation" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.230020 4954 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.230953 4954 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-13 13:00:42.046240229 +0000 UTC Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.231053 4954 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 356h49m54.815190671s for next certificate rotation Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.257913 4954 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.261243 4954 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.277157 4954 log.go:25] "Validated CRI v1 runtime API" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.366095 4954 log.go:25] "Validated CRI v1 image API" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.385954 4954 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.390045 4954 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-16-05-37-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.390104 4954 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.418570 4954 manager.go:217] Machine: {Timestamp:2025-11-28 16:10:47.41432707 +0000 UTC m=+0.805995681 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:0cbba33b-5b41-4595-ae12-f91c5d706ba3 BootID:f44fb9d0-93ac-4b47-a932-40af0d9339d2 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:56:5a:27 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:56:5a:27 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:d6:15:96 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:97:e0:bd Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:7e:d6:00 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:43:4e:58 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:ce:97:71 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:ae:aa:6a:ae:11:31 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:c6:e9:44:36:de:87 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.419714 4954 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.420091 4954 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.421794 4954 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.422262 4954 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.422332 4954 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.422831 4954 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.422861 4954 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.423225 4954 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.423309 4954 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.423703 4954 state_mem.go:36] "Initialized new in-memory state store" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.424338 4954 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.426639 4954 kubelet.go:418] "Attempting to sync node with API server" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.426689 4954 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.426759 4954 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.426787 4954 kubelet.go:324] "Adding apiserver pod source" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.426812 4954 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.430589 4954 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.431627 4954 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.433317 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.433436 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.433389 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.433518 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.434673 4954 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435692 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435734 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435748 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435763 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435788 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435802 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435816 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435836 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435861 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435877 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435918 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.435934 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.436547 4954 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.437249 4954 server.go:1280] "Started kubelet" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.437993 4954 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.438032 4954 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.439326 4954 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 16:10:47 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.439806 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.440348 4954 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.440393 4954 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.440426 4954 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 19:54:30.533101565 +0000 UTC Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.440499 4954 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 363h43m43.092609113s for next certificate rotation Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.440984 4954 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.441002 4954 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.441058 4954 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.441521 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.441603 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.441715 4954 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.442016 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="200ms" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.442591 4954 server.go:460] "Adding debug handlers to kubelet server" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.442852 4954 factory.go:55] Registering systemd factory Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.442928 4954 factory.go:221] Registration of the systemd container factory successfully Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.443324 4954 factory.go:153] Registering CRI-O factory Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.443367 4954 factory.go:221] Registration of the crio container factory successfully Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.443477 4954 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.443511 4954 factory.go:103] Registering Raw factory Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.443561 4954 manager.go:1196] Started watching for new ooms in manager Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.444251 4954 manager.go:319] Starting recovery of all containers Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.443309 4954 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.173:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c37906b499ecf default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:10:47.437197007 +0000 UTC m=+0.828865578,LastTimestamp:2025-11-28 16:10:47.437197007 +0000 UTC m=+0.828865578,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461134 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461273 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461296 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461314 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461333 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461405 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461426 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461448 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461481 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461500 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461553 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461624 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461650 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461679 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461699 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461717 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461738 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461756 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461776 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461794 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461813 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461848 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461866 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461895 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461955 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.461974 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462072 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462152 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462197 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462218 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462392 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462432 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462469 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462488 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462507 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462564 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462598 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462631 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.462667 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.463464 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.463592 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.463860 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.463925 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.464029 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.464064 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.464098 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.464337 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.464357 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.464394 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.466451 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.467281 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.467453 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.467486 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.467508 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.467679 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.467703 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468724 4954 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468781 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468805 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468827 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468846 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468867 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468885 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468904 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468926 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468948 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468970 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.468990 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469010 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469029 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469049 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469071 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469119 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469137 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469158 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469177 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469323 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469343 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469366 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469388 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469428 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469448 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469467 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469487 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469507 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469577 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469607 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469632 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469657 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469689 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469718 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469746 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469773 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469791 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469810 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469830 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469853 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469905 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.469922 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470067 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470091 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470109 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470126 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470145 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470164 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470191 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470212 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470235 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470255 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470275 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470298 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470321 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470353 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470376 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470396 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470417 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470436 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470457 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470509 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470553 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470573 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470591 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470609 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470629 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470650 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470668 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470687 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470705 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470726 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470745 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470765 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470786 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470806 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470825 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470843 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470862 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470881 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470901 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470951 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470972 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.470992 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471010 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471029 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471050 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471079 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471100 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471121 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471166 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471189 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471212 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471230 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471305 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471324 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471345 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471368 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471390 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471453 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471477 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471501 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471550 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471570 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471589 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471609 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471628 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471654 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471672 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471690 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.471709 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.474896 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.474946 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.474958 4954 manager.go:324] Recovery completed Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.474968 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475134 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475190 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475225 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475253 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475277 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475301 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475324 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475352 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475375 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475399 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475459 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475489 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475515 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475575 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475609 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475637 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475662 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475687 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475714 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475738 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475765 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475791 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475817 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475843 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475870 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475897 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475924 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475952 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.475979 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476003 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476029 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476053 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476080 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476110 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476136 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476162 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476191 4954 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476219 4954 reconstruct.go:97] "Volume reconstruction finished" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.476237 4954 reconciler.go:26] "Reconciler: start to sync state" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.486014 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.487919 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.487973 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.487991 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.489202 4954 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.489232 4954 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.489263 4954 state_mem.go:36] "Initialized new in-memory state store" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.542714 4954 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.642868 4954 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.643816 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="400ms" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.743980 4954 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.844375 4954 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.851831 4954 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.854789 4954 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.854851 4954 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.854889 4954 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.854961 4954 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 16:10:47 crc kubenswrapper[4954]: W1128 16:10:47.870775 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.871201 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.875712 4954 policy_none.go:49] "None policy: Start" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.877569 4954 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.877627 4954 state_mem.go:35] "Initializing new in-memory state store" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.945208 4954 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.955433 4954 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.960291 4954 manager.go:334] "Starting Device Plugin manager" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.960913 4954 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.960938 4954 server.go:79] "Starting device plugin registration server" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.961396 4954 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.961422 4954 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.962105 4954 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.962201 4954 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 16:10:47 crc kubenswrapper[4954]: I1128 16:10:47.962222 4954 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 16:10:47 crc kubenswrapper[4954]: E1128 16:10:47.976454 4954 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.045551 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="800ms" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.062596 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.063957 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.064009 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.064024 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.064055 4954 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.064621 4954 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.173:6443: connect: connection refused" node="crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.156148 4954 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.156263 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.157252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.157291 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.157303 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.157426 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.157798 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.157869 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.157991 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158020 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158144 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158278 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158321 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158883 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158916 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158929 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158937 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158959 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.158983 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.159003 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.159013 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.159026 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.159455 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.159479 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.160041 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.160070 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.160084 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.160216 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.160716 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.160754 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161156 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161362 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161159 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161386 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161394 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161374 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161599 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161634 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161688 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161714 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.161728 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.162371 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.162403 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.162414 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.264683 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.265834 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.265880 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.265898 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.265923 4954 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.266437 4954 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.173:6443: connect: connection refused" node="crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289079 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289154 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289206 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289299 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289380 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289419 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289447 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289474 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289503 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289599 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289631 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289673 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289790 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289883 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.289946 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.308418 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.308509 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391496 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391599 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391626 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391651 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391677 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391713 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391740 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391765 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391795 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391822 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391847 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391872 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391894 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391884 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391944 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392014 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391985 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392032 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392088 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392100 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392064 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392107 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392109 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392057 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392151 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392214 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392163 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.391916 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392287 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.392426 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.441433 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.503487 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.533075 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.556408 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.567311 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-a1a74c20b099d3dd3cbf08dd40bda18c1655f2eb41be3e933dda895e7484e0f9 WatchSource:0}: Error finding container a1a74c20b099d3dd3cbf08dd40bda18c1655f2eb41be3e933dda895e7484e0f9: Status 404 returned error can't find the container with id a1a74c20b099d3dd3cbf08dd40bda18c1655f2eb41be3e933dda895e7484e0f9 Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.576691 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.580467 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.594442 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-75eaa6ec6d12353596c650f630b6a99708d2418b2b37342c34f7f0150d1e3118 WatchSource:0}: Error finding container 75eaa6ec6d12353596c650f630b6a99708d2418b2b37342c34f7f0150d1e3118: Status 404 returned error can't find the container with id 75eaa6ec6d12353596c650f630b6a99708d2418b2b37342c34f7f0150d1e3118 Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.624151 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-d796964b5fc987daccba29059c42fc2061c5ed88d149f5b7acb6ec3f8837c503 WatchSource:0}: Error finding container d796964b5fc987daccba29059c42fc2061c5ed88d149f5b7acb6ec3f8837c503: Status 404 returned error can't find the container with id d796964b5fc987daccba29059c42fc2061c5ed88d149f5b7acb6ec3f8837c503 Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.626484 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-7a1251c2243729da801bf2e0c8d69cbd7c8d01aff55904030a6e07e5a3caf52c WatchSource:0}: Error finding container 7a1251c2243729da801bf2e0c8d69cbd7c8d01aff55904030a6e07e5a3caf52c: Status 404 returned error can't find the container with id 7a1251c2243729da801bf2e0c8d69cbd7c8d01aff55904030a6e07e5a3caf52c Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.629040 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-0626f97723f5056d942aec4aad3a4456c987460034645f45ed3de589720a6362 WatchSource:0}: Error finding container 0626f97723f5056d942aec4aad3a4456c987460034645f45ed3de589720a6362: Status 404 returned error can't find the container with id 0626f97723f5056d942aec4aad3a4456c987460034645f45ed3de589720a6362 Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.661569 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.661676 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.667071 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.668928 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.668971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.668986 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.669013 4954 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.669541 4954 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.173:6443: connect: connection refused" node="crc" Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.820581 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.820695 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:48 crc kubenswrapper[4954]: W1128 16:10:48.840066 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.840215 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:48 crc kubenswrapper[4954]: E1128 16:10:48.847063 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="1.6s" Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.861805 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"75eaa6ec6d12353596c650f630b6a99708d2418b2b37342c34f7f0150d1e3118"} Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.863365 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"a1a74c20b099d3dd3cbf08dd40bda18c1655f2eb41be3e933dda895e7484e0f9"} Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.865191 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0626f97723f5056d942aec4aad3a4456c987460034645f45ed3de589720a6362"} Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.866400 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7a1251c2243729da801bf2e0c8d69cbd7c8d01aff55904030a6e07e5a3caf52c"} Nov 28 16:10:48 crc kubenswrapper[4954]: I1128 16:10:48.868040 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d796964b5fc987daccba29059c42fc2061c5ed88d149f5b7acb6ec3f8837c503"} Nov 28 16:10:49 crc kubenswrapper[4954]: I1128 16:10:49.441379 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:49 crc kubenswrapper[4954]: I1128 16:10:49.470680 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:49 crc kubenswrapper[4954]: I1128 16:10:49.473129 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:49 crc kubenswrapper[4954]: I1128 16:10:49.473185 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:49 crc kubenswrapper[4954]: I1128 16:10:49.473203 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:49 crc kubenswrapper[4954]: I1128 16:10:49.473237 4954 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:49 crc kubenswrapper[4954]: E1128 16:10:49.473924 4954 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.173:6443: connect: connection refused" node="crc" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.441027 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:50 crc kubenswrapper[4954]: E1128 16:10:50.448750 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="3.2s" Nov 28 16:10:50 crc kubenswrapper[4954]: W1128 16:10:50.545149 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:50 crc kubenswrapper[4954]: E1128 16:10:50.545251 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.876658 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107" exitCode=0 Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.876794 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107"} Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.876841 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.878209 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.878283 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.878308 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.879506 4954 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="11db6a01b20b7c530007b4dd86193150ebcb6229017cd42f9cae4ed9672fc5e3" exitCode=0 Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.879585 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"11db6a01b20b7c530007b4dd86193150ebcb6229017cd42f9cae4ed9672fc5e3"} Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.880444 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.880599 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.882323 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.882374 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.882392 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.882777 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.882824 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.882891 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.883160 4954 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="4b760bb4479062363e578bfb1df320735d51bd18bcf5147e5f09747b09546673" exitCode=0 Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.883262 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.883273 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"4b760bb4479062363e578bfb1df320735d51bd18bcf5147e5f09747b09546673"} Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.885215 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.885291 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.885319 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.891673 4954 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8" exitCode=0 Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.891820 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8"} Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.891848 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.893127 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.893176 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.893195 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:50 crc kubenswrapper[4954]: I1128 16:10:50.894271 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464"} Nov 28 16:10:51 crc kubenswrapper[4954]: W1128 16:10:51.019101 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:51 crc kubenswrapper[4954]: E1128 16:10:51.019202 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.074069 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.075127 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.075177 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.075191 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.075222 4954 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:51 crc kubenswrapper[4954]: E1128 16:10:51.075773 4954 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.173:6443: connect: connection refused" node="crc" Nov 28 16:10:51 crc kubenswrapper[4954]: W1128 16:10:51.134277 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:51 crc kubenswrapper[4954]: E1128 16:10:51.134347 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.440902 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:51 crc kubenswrapper[4954]: W1128 16:10:51.507883 4954 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:51 crc kubenswrapper[4954]: E1128 16:10:51.507972 4954 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.173:6443: connect: connection refused" logger="UnhandledError" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.903821 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.903872 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.903887 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.903968 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.905627 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.905661 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.905676 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.907699 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.907738 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.909784 4954 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a5b63618fe32efede7129d09e2be2a1160cefb0910ab24ec261649746ff6c653" exitCode=0 Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.909840 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a5b63618fe32efede7129d09e2be2a1160cefb0910ab24ec261649746ff6c653"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.909966 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.912032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.912067 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.912079 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.915244 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"5b6ba9eaa7660cf70b083a47af130e08fd2635c2b2c2a69a9302bf8284cb3280"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.915455 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.916879 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.916922 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.916936 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.921887 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94"} Nov 28 16:10:51 crc kubenswrapper[4954]: I1128 16:10:51.921925 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1"} Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.444038 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:52 crc kubenswrapper[4954]: E1128 16:10:52.540143 4954 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.173:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c37906b499ecf default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:10:47.437197007 +0000 UTC m=+0.828865578,LastTimestamp:2025-11-28 16:10:47.437197007 +0000 UTC m=+0.828865578,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.934166 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e2df18602cb127759049cbea162ca2ddf4e21c1713183a7fcdf62ca781a5bfac"} Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.934217 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36"} Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.934228 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515"} Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.934472 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.936141 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.936178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.936192 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.937928 4954 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f0e45ad2911d8ed39bc3add6de7b1b3281e168ebb3651cc4513be11902699fdf" exitCode=0 Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.938036 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f0e45ad2911d8ed39bc3add6de7b1b3281e168ebb3651cc4513be11902699fdf"} Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.938203 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.939153 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.939192 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.939209 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.949548 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def"} Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.949649 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.949706 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.949767 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.951315 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.951345 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.951358 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.951847 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.951870 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.951882 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.952378 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.952395 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.952405 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:52 crc kubenswrapper[4954]: I1128 16:10:52.967997 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.389067 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.441132 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:10:53 crc kubenswrapper[4954]: E1128 16:10:53.650450 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="6.4s" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.824603 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.953492 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.956201 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e2df18602cb127759049cbea162ca2ddf4e21c1713183a7fcdf62ca781a5bfac" exitCode=255 Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.956277 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e2df18602cb127759049cbea162ca2ddf4e21c1713183a7fcdf62ca781a5bfac"} Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.956370 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.957656 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.957713 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.957736 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.958694 4954 scope.go:117] "RemoveContainer" containerID="e2df18602cb127759049cbea162ca2ddf4e21c1713183a7fcdf62ca781a5bfac" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.973433 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.974503 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5c0811323f5225108fc884caaf8772c68fc0b78c2733c29b7478a0932f5bd3f1"} Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.974747 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c9a536c63b5e991b755dcc59c530a9df627798421830320812385f99cd7ea4c6"} Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.975042 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.976040 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.977823 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.978012 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.978147 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.979575 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.980361 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:53 crc kubenswrapper[4954]: I1128 16:10:53.980701 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.206331 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.276370 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.277635 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.277664 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.277675 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.277701 4954 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.978882 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.980954 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3"} Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.981072 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.981247 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.982215 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.982243 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.982252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.984742 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ae11691f00b8caaa9e2a9b9f0cb53a8a083a33a11acbfcc20bebfc0ff2dd5b2e"} Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.984788 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"62e6e0f056b85b3f3f1a216913798ac522080ed9a49c8204b457ce927952c706"} Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.984806 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.984810 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6e6c4a5f20c5a103ede81b0361533e7ccd1f682523d7b7e6d2d34a0f9fb5e277"} Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.984940 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.984978 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.985691 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.985752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.985771 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.986343 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.986356 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.986397 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.986425 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.986398 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:54 crc kubenswrapper[4954]: I1128 16:10:54.986578 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.968359 4954 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.968448 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.987262 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.987286 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.987330 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.988467 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.988556 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.988583 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.989029 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.989067 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:55 crc kubenswrapper[4954]: I1128 16:10:55.989080 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.376793 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.991016 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.991324 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.993176 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.993210 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.993225 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.993298 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.993338 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:56 crc kubenswrapper[4954]: I1128 16:10:56.993355 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:10:57 crc kubenswrapper[4954]: I1128 16:10:57.341069 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:10:57 crc kubenswrapper[4954]: E1128 16:10:57.976859 4954 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 16:10:57 crc kubenswrapper[4954]: I1128 16:10:57.992941 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:10:57 crc kubenswrapper[4954]: I1128 16:10:57.994134 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:10:57 crc kubenswrapper[4954]: I1128 16:10:57.994195 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:10:57 crc kubenswrapper[4954]: I1128 16:10:57.994218 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.195790 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.196073 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.197559 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.197596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.197607 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.521844 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.903806 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:11:01 crc kubenswrapper[4954]: I1128 16:11:01.910251 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:11:02 crc kubenswrapper[4954]: I1128 16:11:02.004507 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:02 crc kubenswrapper[4954]: I1128 16:11:02.006031 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:02 crc kubenswrapper[4954]: I1128 16:11:02.006117 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:02 crc kubenswrapper[4954]: I1128 16:11:02.006145 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4954]: I1128 16:11:03.006566 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:03 crc kubenswrapper[4954]: I1128 16:11:03.009032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:03 crc kubenswrapper[4954]: I1128 16:11:03.009426 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:03 crc kubenswrapper[4954]: I1128 16:11:03.009454 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:03 crc kubenswrapper[4954]: I1128 16:11:03.825340 4954 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": context deadline exceeded" start-of-body= Nov 28 16:11:03 crc kubenswrapper[4954]: I1128 16:11:03.825429 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": context deadline exceeded" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.207613 4954 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.207737 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 16:11:04 crc kubenswrapper[4954]: E1128 16:11:04.279235 4954 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.363891 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.364087 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.365361 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.365404 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.365416 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.406921 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 16:11:04 crc kubenswrapper[4954]: I1128 16:11:04.441415 4954 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.012199 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.013773 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.013845 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.013865 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.027292 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.329408 4954 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.329560 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.969893 4954 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 16:11:05 crc kubenswrapper[4954]: I1128 16:11:05.969998 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:11:06 crc kubenswrapper[4954]: I1128 16:11:06.015211 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:06 crc kubenswrapper[4954]: I1128 16:11:06.016301 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:06 crc kubenswrapper[4954]: I1128 16:11:06.016362 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:06 crc kubenswrapper[4954]: I1128 16:11:06.016383 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:07 crc kubenswrapper[4954]: E1128 16:11:07.978043 4954 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 16:11:08 crc kubenswrapper[4954]: I1128 16:11:08.829623 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:11:08 crc kubenswrapper[4954]: I1128 16:11:08.829855 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:08 crc kubenswrapper[4954]: I1128 16:11:08.831428 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:08 crc kubenswrapper[4954]: I1128 16:11:08.831509 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:08 crc kubenswrapper[4954]: I1128 16:11:08.831554 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:08 crc kubenswrapper[4954]: I1128 16:11:08.837101 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:11:09 crc kubenswrapper[4954]: I1128 16:11:09.023441 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:09 crc kubenswrapper[4954]: I1128 16:11:09.024718 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:09 crc kubenswrapper[4954]: I1128 16:11:09.024760 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:09 crc kubenswrapper[4954]: I1128 16:11:09.024769 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.291435 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.293427 4954 trace.go:236] Trace[1234923862]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:56.919) (total time: 13374ms): Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[1234923862]: ---"Objects listed" error: 13374ms (16:11:10.293) Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[1234923862]: [13.374170171s] [13.374170171s] END Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.293460 4954 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.294568 4954 trace.go:236] Trace[1789286184]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:55.958) (total time: 14336ms): Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[1789286184]: ---"Objects listed" error: 14336ms (16:11:10.294) Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[1789286184]: [14.336257649s] [14.336257649s] END Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.294614 4954 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.295641 4954 trace.go:236] Trace[2071834532]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:57.751) (total time: 12543ms): Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[2071834532]: ---"Objects listed" error: 12543ms (16:11:10.295) Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[2071834532]: [12.543718175s] [12.543718175s] END Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.295685 4954 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.296312 4954 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.298925 4954 trace.go:236] Trace[1397639885]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 16:10:56.994) (total time: 13304ms): Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[1397639885]: ---"Objects listed" error: 13304ms (16:11:10.298) Nov 28 16:11:10 crc kubenswrapper[4954]: Trace[1397639885]: [13.304512124s] [13.304512124s] END Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.298968 4954 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.381186 4954 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47686->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.381296 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47686->192.168.126.11:17697: read: connection reset by peer" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.381877 4954 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.381987 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.382731 4954 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.382844 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.441251 4954 apiserver.go:52] "Watching apiserver" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.535370 4954 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.535768 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.536221 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.536345 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.536354 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.536472 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.536684 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.537064 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.537103 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.537296 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.537366 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.539281 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.539616 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.540517 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.540644 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.540712 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.540717 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.541090 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.542020 4954 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.542554 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.542683 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.582135 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.592380 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598223 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598274 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598301 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598323 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598349 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598376 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598401 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598705 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598725 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598780 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598861 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598884 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.598883 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599076 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599367 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599484 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599492 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599580 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599607 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599632 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.599886 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600041 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600180 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600634 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600711 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600740 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600766 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600792 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600815 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600834 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600865 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600884 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600900 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600917 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600933 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.600954 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602071 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602090 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602396 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602441 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602461 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602479 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602494 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602514 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.603151 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.603189 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601104 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.605627 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.605707 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.605760 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601144 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601173 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601347 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601395 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601575 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606010 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601605 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.601604 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602008 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602044 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602206 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.602356 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.605839 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.605918 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.605923 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606181 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606213 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606283 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606324 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606362 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606447 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606489 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606562 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606595 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606630 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606664 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606694 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606731 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606776 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606805 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606853 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606886 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606925 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606957 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.606986 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607017 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607053 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607084 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607112 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607147 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607183 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607211 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607241 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607273 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607661 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607732 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607774 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607808 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.607841 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608304 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608364 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608397 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608425 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608454 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608485 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608556 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609182 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609250 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608947 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.608640 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609366 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609397 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609421 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609466 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609544 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.609607 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610247 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610292 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610342 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610380 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610409 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610452 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610493 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610521 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610574 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610609 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610642 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610674 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610707 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610864 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611101 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611140 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611162 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611193 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611214 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611235 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611255 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611285 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611305 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611323 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611342 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611371 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611398 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611417 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611435 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611451 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611469 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611582 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611602 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611621 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611650 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611668 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611864 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612048 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612069 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612089 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612109 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612136 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612163 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612198 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612225 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612244 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612264 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612285 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612302 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612323 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612345 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612521 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612913 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612948 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.613019 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.613265 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.613518 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.613618 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.613660 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615287 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610449 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615705 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615634 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617154 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617227 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617257 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615567 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617473 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617519 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617558 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617577 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617594 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.617613 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.618208 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610790 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611036 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611037 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611055 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611095 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611136 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611363 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611481 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611477 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611723 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611751 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.611997 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612206 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612442 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612353 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.612344 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.614562 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.614695 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.614919 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.614961 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615224 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615265 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615352 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615366 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.615513 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610681 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.610699 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.619975 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620201 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620311 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.618521 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620675 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620682 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620757 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620803 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620897 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620941 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620951 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.620990 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621025 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621050 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621075 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621098 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621124 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621158 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621181 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621194 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621195 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621207 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621260 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621263 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621313 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621500 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621513 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621634 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621914 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621933 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621933 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621977 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.622198 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.622326 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.622627 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.622731 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.623894 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.622751 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.623117 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.623195 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.623961 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.623468 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.621335 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624033 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624119 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624167 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624058 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624214 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624211 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624245 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624233 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624354 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624387 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624420 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624453 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624480 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624507 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624516 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624557 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624587 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624710 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.624967 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625267 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625477 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625652 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625750 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625781 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625808 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625837 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625866 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625893 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625919 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625949 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.625981 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626010 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626118 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626148 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626212 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626239 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626270 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626387 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626431 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626561 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626610 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626637 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626664 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626683 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626705 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626738 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626761 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626772 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626767 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626807 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626869 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626869 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626947 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626991 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626987 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.626981 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627070 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627183 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627283 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627285 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627154 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.627488 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:11.127459438 +0000 UTC m=+24.519128089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627661 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627734 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.627889 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628036 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628282 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628329 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628354 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628353 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628471 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.628562 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628582 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628825 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.628802 4954 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629143 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629226 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629246 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629305 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629327 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629348 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629543 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629560 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629646 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.629860 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.630015 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.630077 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:11.130060522 +0000 UTC m=+24.521729173 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.630153 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.630237 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:11.130227338 +0000 UTC m=+24.521895979 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.630560 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.630811 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.630876 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631671 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631801 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631829 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631844 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631855 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631865 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631876 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631880 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631889 4954 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631936 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631951 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631965 4954 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631979 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.631992 4954 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632005 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632021 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632035 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632049 4954 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632062 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632075 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632087 4954 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632100 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632112 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632124 4954 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632137 4954 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632149 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632161 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632174 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632187 4954 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632199 4954 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632211 4954 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632223 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632236 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632249 4954 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632263 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632276 4954 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632288 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632301 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632315 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632327 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632340 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632353 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632365 4954 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632379 4954 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632392 4954 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632403 4954 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632417 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632430 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632443 4954 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632455 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632467 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632481 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632493 4954 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632506 4954 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632517 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632551 4954 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632565 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632579 4954 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632592 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632605 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632617 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632629 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632641 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632652 4954 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632665 4954 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632677 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632689 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632703 4954 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632714 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632727 4954 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632740 4954 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632753 4954 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632765 4954 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632778 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632789 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632801 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632812 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632825 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632839 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632851 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632866 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632879 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632892 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632903 4954 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632915 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632927 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632940 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632953 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632965 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632978 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.632989 4954 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633002 4954 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633013 4954 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633025 4954 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633039 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633051 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633063 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633076 4954 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633089 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633103 4954 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633115 4954 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633128 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633141 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633153 4954 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633166 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633178 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633190 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633202 4954 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633214 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633227 4954 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633240 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633252 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633265 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633277 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633288 4954 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633300 4954 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633313 4954 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633325 4954 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633346 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633361 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633373 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633386 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.633398 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.636821 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.640898 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.641017 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.641118 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.641581 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.641842 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.642642 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.642785 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.642858 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.643217 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.643350 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.643381 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.643401 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.643429 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.643451 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.643462 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:11.143442527 +0000 UTC m=+24.535111078 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.643465 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.644032 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:11.143506649 +0000 UTC m=+24.535175380 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.644066 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.644783 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.645434 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.646954 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.649644 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.651363 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.651927 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.654796 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.654998 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.655380 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.655656 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.656288 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.656455 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.657256 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.657885 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.661092 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.662067 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.662633 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.662744 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.662880 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.662514 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.662976 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.663056 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.663349 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.665393 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.665960 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.666909 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.666949 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.667545 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.667897 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.667945 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.668354 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.668679 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.669817 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.670142 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.670425 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.670557 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.670578 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.671360 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.671083 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.671342 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.671408 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.671592 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.672008 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.672052 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.672806 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.673080 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.673486 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.675540 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.675812 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.675951 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.675968 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.676632 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.679592 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.679698 4954 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.680755 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.681596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.681625 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.681635 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.681777 4954 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.686030 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.687983 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.690464 4954 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.690566 4954 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.691786 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.691813 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.691824 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.691840 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.691852 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.693125 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.702754 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.707919 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.711624 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.711664 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.711673 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.711691 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.711702 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.723461 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.728138 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.728241 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.728330 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.728418 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.728486 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.733844 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.733915 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734035 4954 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734050 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734063 4954 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734144 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734162 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734176 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734189 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734228 4954 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734028 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734267 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734295 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734311 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734325 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734337 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734349 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734362 4954 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734375 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734386 4954 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734397 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734437 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734449 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734460 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734472 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734484 4954 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734494 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734505 4954 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734516 4954 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734551 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734563 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734574 4954 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734586 4954 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734597 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734608 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734618 4954 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734628 4954 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734639 4954 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734650 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734662 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734674 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734687 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734699 4954 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734711 4954 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734722 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734734 4954 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734746 4954 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734757 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734768 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734782 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734793 4954 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734805 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734818 4954 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734831 4954 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734843 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734855 4954 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734866 4954 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734876 4954 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734888 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734900 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734912 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734924 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734936 4954 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734947 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734959 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734972 4954 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734983 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.734995 4954 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735006 4954 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735017 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735028 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735040 4954 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735052 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735065 4954 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735077 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735089 4954 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.735102 4954 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.741785 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.754152 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.754198 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.754210 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.754226 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.754238 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.765268 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.770966 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.771182 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.771251 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.771318 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.771376 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.779430 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:10 crc kubenswrapper[4954]: E1128 16:11:10.779835 4954 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.781411 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.781458 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.781471 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.781489 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.781504 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.850440 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.860098 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.871043 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 16:11:10 crc kubenswrapper[4954]: W1128 16:11:10.871217 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-0cc9ef20a6ff217aa669521a2871bf86c1c56bda3d1be4f3e5fb161862d5aa88 WatchSource:0}: Error finding container 0cc9ef20a6ff217aa669521a2871bf86c1c56bda3d1be4f3e5fb161862d5aa88: Status 404 returned error can't find the container with id 0cc9ef20a6ff217aa669521a2871bf86c1c56bda3d1be4f3e5fb161862d5aa88 Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.891819 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.891930 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.891989 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.892062 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:10 crc kubenswrapper[4954]: I1128 16:11:10.892117 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:10Z","lastTransitionTime":"2025-11-28T16:11:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.003746 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.003788 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.003797 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.003813 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.003822 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.033336 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"9e45135afaebde6a4229e0ddb4758b266dc23c3085607b62f05536d966d8388d"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.035350 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.035908 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.037615 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3" exitCode=255 Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.037685 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.037754 4954 scope.go:117] "RemoveContainer" containerID="e2df18602cb127759049cbea162ca2ddf4e21c1713183a7fcdf62ca781a5bfac" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.040224 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8c32565648a0dcf05c3e17fdbfd8d08bbdf641be3ff0031602e63727b64f684c"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.042992 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0cc9ef20a6ff217aa669521a2871bf86c1c56bda3d1be4f3e5fb161862d5aa88"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.051879 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.070086 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.079866 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.089367 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.098909 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.105985 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.106031 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.106043 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.106060 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.106072 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.109605 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.137086 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.137174 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.137207 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.137305 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.137362 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:12.137345531 +0000 UTC m=+25.529014072 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.137767 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:12.137754755 +0000 UTC m=+25.529423296 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.137850 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.137986 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:12.137954291 +0000 UTC m=+25.529622912 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.150729 4954 scope.go:117] "RemoveContainer" containerID="4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3" Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.151155 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.151312 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.208967 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.209031 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.209041 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.209062 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.209077 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.238599 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.238652 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.238804 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.238826 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.238840 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.238881 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.238931 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.238950 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.238907 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:12.238890154 +0000 UTC m=+25.630558695 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.239126 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:12.23905518 +0000 UTC m=+25.630723761 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.312083 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.312131 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.312140 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.312154 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.312165 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.415252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.415300 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.415310 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.415338 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.415350 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.518432 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.518507 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.518541 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.518569 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.518586 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.620829 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.620856 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.620864 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.620881 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.620894 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.723388 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.723426 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.723434 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.723448 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.723458 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.826435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.826474 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.826484 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.826499 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.826512 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.855290 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.855447 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.855573 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:11 crc kubenswrapper[4954]: E1128 16:11:11.855650 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.859985 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.860957 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.862639 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.863488 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.864960 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.865790 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.866607 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.867809 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.868595 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.869828 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.870498 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.871990 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.872680 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.873402 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.874757 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.875434 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.876782 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.877277 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.878026 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.879327 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.879920 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.881196 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.881655 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.882665 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.883070 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.883734 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.884855 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.885321 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.886254 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.886713 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.887554 4954 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.887659 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.889264 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.890197 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.890624 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.892290 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.893180 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.894068 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.894691 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.895796 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.896216 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.897196 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.897833 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.898784 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.899214 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.900379 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.901003 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.902278 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.902843 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.903815 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.904245 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.905150 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.905692 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.906115 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.929643 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.929681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.929690 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.929704 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:11 crc kubenswrapper[4954]: I1128 16:11:11.929714 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:11Z","lastTransitionTime":"2025-11-28T16:11:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.032020 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.032053 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.032061 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.032072 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.032081 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.047268 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.050324 4954 scope.go:117] "RemoveContainer" containerID="4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3" Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.050679 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.050808 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.052237 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.063988 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.075953 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.086936 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.099339 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.110888 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.121158 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.130903 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.134410 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.134441 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.134452 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.134468 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.134479 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.146262 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.146389 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.146473 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:14.146432733 +0000 UTC m=+27.538101284 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.146514 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.147044 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:14.147022882 +0000 UTC m=+27.538691433 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.147241 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.147408 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:14.147355913 +0000 UTC m=+27.539024504 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.146553 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.152761 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.168518 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.179156 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.196118 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.207968 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.219567 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.231411 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.237199 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.237252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.237267 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.237288 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.237303 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.249831 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.249883 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250018 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250039 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250055 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250065 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250099 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250111 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250119 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:14.250101485 +0000 UTC m=+27.641770036 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.250171 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:14.250154026 +0000 UTC m=+27.641822567 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.308339 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-87jtn"] Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.308742 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.309607 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-jprxj"] Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.310032 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-fn6fg"] Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.310519 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.310802 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-mg7f9"] Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.310866 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.311034 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.311074 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.311210 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.311231 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.311966 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.314647 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.314869 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wrv7t"] Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.315066 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.315637 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316060 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316136 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316312 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316391 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316554 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316819 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316852 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.316975 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.318363 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.320046 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.320356 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.321187 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.321958 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.322006 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.322021 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.323490 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.328254 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.339743 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.340478 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.341627 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.341662 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.341679 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.342500 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.357226 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.370898 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.386797 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.396899 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.407047 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.416334 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.423422 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.438264 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.443973 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.444004 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.444015 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.444030 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.444040 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451574 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451606 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-etc-kubernetes\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451630 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-etc-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451650 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-env-overrides\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451672 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-socket-dir-parent\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451691 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dkp5\" (UniqueName: \"kubernetes.io/projected/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-kube-api-access-2dkp5\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451714 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-system-cni-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451734 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-kubelet\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451753 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/92ddd1ce-e1c1-4606-8b8d-066aeba50079-rootfs\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451774 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-ovn\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451794 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-bin\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451814 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cni-binary-copy\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451833 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-systemd-units\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451852 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cnibin\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451873 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-427wm\" (UniqueName: \"kubernetes.io/projected/92ddd1ce-e1c1-4606-8b8d-066aeba50079-kube-api-access-427wm\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451895 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-script-lib\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.451919 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-system-cni-dir\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452030 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-os-release\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452078 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-conf-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452110 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-systemd\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452188 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-os-release\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452223 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-daemon-config\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452255 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-netns\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452283 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-netd\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452344 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-cni-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452376 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-cni-bin\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452411 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/92ddd1ce-e1c1-4606-8b8d-066aeba50079-mcd-auth-proxy-config\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452441 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-config\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452463 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-slash\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452489 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-var-lib-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452511 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-multus-certs\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452551 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/92ddd1ce-e1c1-4606-8b8d-066aeba50079-proxy-tls\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452576 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-kubelet\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452597 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-netns\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452618 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452639 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-cni-binary-copy\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452658 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-k8s-cni-cncf-io\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452677 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452698 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4b5g\" (UniqueName: \"kubernetes.io/projected/604274ad-4365-4c81-a94f-9a234d4aa997-kube-api-access-j4b5g\") pod \"node-resolver-mg7f9\" (UID: \"604274ad-4365-4c81-a94f-9a234d4aa997\") " pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452753 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-cni-multus\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452805 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-log-socket\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452852 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d782w\" (UniqueName: \"kubernetes.io/projected/4fb02adc-75ef-4e63-841d-7fa817cc8da2-kube-api-access-d782w\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452886 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-cnibin\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452915 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-hostroot\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452946 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xklf7\" (UniqueName: \"kubernetes.io/projected/9ee83a2e-2cf9-42c3-b884-11307ed972a9-kube-api-access-xklf7\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452973 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.452999 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-node-log\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.453033 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-ovn-kubernetes\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.453065 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovn-node-metrics-cert\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.453148 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/604274ad-4365-4c81-a94f-9a234d4aa997-hosts-file\") pod \"node-resolver-mg7f9\" (UID: \"604274ad-4365-4c81-a94f-9a234d4aa997\") " pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.454017 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.463890 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.474133 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.480543 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.488144 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.498731 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.507324 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.522074 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.535074 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.544101 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.545659 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.545683 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.545692 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.545705 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.545713 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554192 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-netns\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554229 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-netd\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554256 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-os-release\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554279 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-daemon-config\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554298 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-cni-bin\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554318 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/92ddd1ce-e1c1-4606-8b8d-066aeba50079-mcd-auth-proxy-config\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554323 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-netns\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554339 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-config\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554373 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-cni-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554383 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-cni-bin\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554396 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-slash\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554417 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-var-lib-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554437 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-kubelet\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554442 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-os-release\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554486 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-netns\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554497 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-slash\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554509 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-var-lib-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554455 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-netns\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554543 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-kubelet\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554584 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-multus-certs\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554611 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/92ddd1ce-e1c1-4606-8b8d-066aeba50079-proxy-tls\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554607 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-netd\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554633 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-multus-certs\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554627 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-k8s-cni-cncf-io\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554696 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554728 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4b5g\" (UniqueName: \"kubernetes.io/projected/604274ad-4365-4c81-a94f-9a234d4aa997-kube-api-access-j4b5g\") pod \"node-resolver-mg7f9\" (UID: \"604274ad-4365-4c81-a94f-9a234d4aa997\") " pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554728 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-cni-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554754 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554757 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554744 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-run-k8s-cni-cncf-io\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554803 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-cni-binary-copy\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554833 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-cni-multus\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554858 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-log-socket\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554898 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d782w\" (UniqueName: \"kubernetes.io/projected/4fb02adc-75ef-4e63-841d-7fa817cc8da2-kube-api-access-d782w\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554920 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-cnibin\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554943 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-hostroot\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554966 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.554986 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-node-log\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555005 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-ovn-kubernetes\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555025 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovn-node-metrics-cert\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555046 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/604274ad-4365-4c81-a94f-9a234d4aa997-hosts-file\") pod \"node-resolver-mg7f9\" (UID: \"604274ad-4365-4c81-a94f-9a234d4aa997\") " pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555072 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xklf7\" (UniqueName: \"kubernetes.io/projected/9ee83a2e-2cf9-42c3-b884-11307ed972a9-kube-api-access-xklf7\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555094 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-etc-kubernetes\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555116 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-etc-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555136 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-env-overrides\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555168 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555190 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-socket-dir-parent\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555195 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-log-socket\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555211 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dkp5\" (UniqueName: \"kubernetes.io/projected/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-kube-api-access-2dkp5\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555231 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-daemon-config\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555238 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-kubelet\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555249 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/604274ad-4365-4c81-a94f-9a234d4aa997-hosts-file\") pod \"node-resolver-mg7f9\" (UID: \"604274ad-4365-4c81-a94f-9a234d4aa997\") " pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555269 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/92ddd1ce-e1c1-4606-8b8d-066aeba50079-rootfs\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555276 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555285 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-node-log\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555293 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-cnibin\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555298 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-ovn\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555326 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-ovn-kubernetes\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555287 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-config\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555167 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-cni-multus\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555357 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-bin\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555341 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-ovn\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555330 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-bin\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555376 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/92ddd1ce-e1c1-4606-8b8d-066aeba50079-mcd-auth-proxy-config\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555402 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-etc-kubernetes\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555406 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-system-cni-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555419 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-socket-dir-parent\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555445 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-system-cni-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555455 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-etc-openvswitch\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555321 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-hostroot\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555464 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/92ddd1ce-e1c1-4606-8b8d-066aeba50079-rootfs\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555459 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-host-var-lib-kubelet\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555476 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-cni-binary-copy\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555598 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cni-binary-copy\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555616 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555633 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-systemd-units\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555664 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-script-lib\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555693 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-system-cni-dir\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555721 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cnibin\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555669 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-systemd-units\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555749 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-427wm\" (UniqueName: \"kubernetes.io/projected/92ddd1ce-e1c1-4606-8b8d-066aeba50079-kube-api-access-427wm\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555762 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-system-cni-dir\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555786 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-systemd\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555789 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cnibin\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555815 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-os-release\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555838 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-systemd\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555843 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-conf-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555894 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9ee83a2e-2cf9-42c3-b884-11307ed972a9-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555904 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-os-release\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555925 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-multus-conf-dir\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.555957 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-env-overrides\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.556190 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9ee83a2e-2cf9-42c3-b884-11307ed972a9-cni-binary-copy\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.556298 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-script-lib\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.560601 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovn-node-metrics-cert\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.561373 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/92ddd1ce-e1c1-4606-8b8d-066aeba50079-proxy-tls\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.572687 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xklf7\" (UniqueName: \"kubernetes.io/projected/9ee83a2e-2cf9-42c3-b884-11307ed972a9-kube-api-access-xklf7\") pod \"multus-additional-cni-plugins-fn6fg\" (UID: \"9ee83a2e-2cf9-42c3-b884-11307ed972a9\") " pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.573944 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-427wm\" (UniqueName: \"kubernetes.io/projected/92ddd1ce-e1c1-4606-8b8d-066aeba50079-kube-api-access-427wm\") pod \"machine-config-daemon-jprxj\" (UID: \"92ddd1ce-e1c1-4606-8b8d-066aeba50079\") " pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.575497 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d782w\" (UniqueName: \"kubernetes.io/projected/4fb02adc-75ef-4e63-841d-7fa817cc8da2-kube-api-access-d782w\") pod \"ovnkube-node-wrv7t\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.581469 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4b5g\" (UniqueName: \"kubernetes.io/projected/604274ad-4365-4c81-a94f-9a234d4aa997-kube-api-access-j4b5g\") pod \"node-resolver-mg7f9\" (UID: \"604274ad-4365-4c81-a94f-9a234d4aa997\") " pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.581894 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dkp5\" (UniqueName: \"kubernetes.io/projected/d5ee5420-ed17-4059-8d54-3b486c2ffd1d-kube-api-access-2dkp5\") pod \"multus-87jtn\" (UID: \"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\") " pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.627642 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-87jtn" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.638408 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" Nov 28 16:11:12 crc kubenswrapper[4954]: W1128 16:11:12.638842 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5ee5420_ed17_4059_8d54_3b486c2ffd1d.slice/crio-079ceb7d2e59ee46055006fc7e3e05156c311a9c4dfd7efba033123ab77e9e01 WatchSource:0}: Error finding container 079ceb7d2e59ee46055006fc7e3e05156c311a9c4dfd7efba033123ab77e9e01: Status 404 returned error can't find the container with id 079ceb7d2e59ee46055006fc7e3e05156c311a9c4dfd7efba033123ab77e9e01 Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.647602 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-mg7f9" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.648025 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.648051 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.648061 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.648078 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.648089 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.659162 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:11:12 crc kubenswrapper[4954]: W1128 16:11:12.665584 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod604274ad_4365_4c81_a94f_9a234d4aa997.slice/crio-2f451ba7b6348ca7ce1633ee173d035dac8bc45cdd2482ecb2542b0d85b33238 WatchSource:0}: Error finding container 2f451ba7b6348ca7ce1633ee173d035dac8bc45cdd2482ecb2542b0d85b33238: Status 404 returned error can't find the container with id 2f451ba7b6348ca7ce1633ee173d035dac8bc45cdd2482ecb2542b0d85b33238 Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.666604 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.751084 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.751354 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.751364 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.751385 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.751394 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: W1128 16:11:12.755609 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fb02adc_75ef_4e63_841d_7fa817cc8da2.slice/crio-d61f44f1b0425bbd05df40555cb7051e775d884b06af8a9817d43b9fa6c9d249 WatchSource:0}: Error finding container d61f44f1b0425bbd05df40555cb7051e775d884b06af8a9817d43b9fa6c9d249: Status 404 returned error can't find the container with id d61f44f1b0425bbd05df40555cb7051e775d884b06af8a9817d43b9fa6c9d249 Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.853269 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.853298 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.853307 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.853322 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.853331 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.856248 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:12 crc kubenswrapper[4954]: E1128 16:11:12.856352 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.956457 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.956501 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.956511 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.956550 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.956562 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:12Z","lastTransitionTime":"2025-11-28T16:11:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.973808 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.977662 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.981514 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.986394 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:12 crc kubenswrapper[4954]: I1128 16:11:12.998644 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.013916 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.031405 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.054077 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.055549 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerStarted","Data":"5b04d0c33ff84c1fcc4d45a6c8ef6a1ce13519bed607b052a37f3d04cdca5de6"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.057808 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-87jtn" event={"ID":"d5ee5420-ed17-4059-8d54-3b486c2ffd1d","Type":"ContainerStarted","Data":"079ceb7d2e59ee46055006fc7e3e05156c311a9c4dfd7efba033123ab77e9e01"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.058303 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.058364 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.058385 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.058409 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.058427 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.064129 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"d61f44f1b0425bbd05df40555cb7051e775d884b06af8a9817d43b9fa6c9d249"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.067060 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"887850a53122ec0b4ba92a3813b4682501436081d89b9245a50475b59908643d"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.069281 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.073916 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.074156 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-mg7f9" event={"ID":"604274ad-4365-4c81-a94f-9a234d4aa997","Type":"ContainerStarted","Data":"2f451ba7b6348ca7ce1633ee173d035dac8bc45cdd2482ecb2542b0d85b33238"} Nov 28 16:11:13 crc kubenswrapper[4954]: E1128 16:11:13.082661 4954 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.090225 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.103478 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.121032 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.142056 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.155889 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.160587 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.160632 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.160649 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.160673 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.160690 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.178708 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.198178 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.221849 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.236316 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.252318 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.262933 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.265679 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.265975 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.266010 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.266032 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.370598 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.370681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.370707 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.370739 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.370762 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.473358 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.473420 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.473436 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.473460 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.473478 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.576490 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.576615 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.576643 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.576677 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.576704 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.679595 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.679666 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.679684 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.679710 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.679763 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.782006 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.782085 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.782112 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.782141 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.782161 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.855960 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:13 crc kubenswrapper[4954]: E1128 16:11:13.856166 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.856330 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:13 crc kubenswrapper[4954]: E1128 16:11:13.856758 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.872759 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.884894 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.884949 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.884968 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.884991 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.885009 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.913601 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.934411 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.957354 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.982755 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:13Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.987516 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.987568 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.987578 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.987593 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:13 crc kubenswrapper[4954]: I1128 16:11:13.987601 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:13Z","lastTransitionTime":"2025-11-28T16:11:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.004269 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:14Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.021156 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:14Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.039393 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:14Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.054512 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:14Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.078491 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerStarted","Data":"fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.079851 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-87jtn" event={"ID":"d5ee5420-ed17-4059-8d54-3b486c2ffd1d","Type":"ContainerStarted","Data":"2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.081156 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.089980 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.090023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.090060 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.090079 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.090093 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.178032 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.178134 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.178207 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:18.178181306 +0000 UTC m=+31.569849857 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.178312 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.178327 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.178383 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:18.178367222 +0000 UTC m=+31.570035763 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.178411 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.178548 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:18.178539018 +0000 UTC m=+31.570207559 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.193163 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.193192 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.193201 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.193216 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.193233 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.206778 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.207803 4954 scope.go:117] "RemoveContainer" containerID="4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3" Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.208061 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.279422 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.279518 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.279595 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.279617 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.279627 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.279678 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:18.279665717 +0000 UTC m=+31.671334248 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.279827 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.279898 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.279916 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.280061 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:18.280033899 +0000 UTC m=+31.671702470 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.295628 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.295672 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.295714 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.295738 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.295757 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.398587 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.398652 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.398666 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.398685 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.398717 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.501751 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.501791 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.501802 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.501822 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.501833 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.604159 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.604211 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.604221 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.604235 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.604243 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.710075 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.710132 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.710149 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.710172 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.710190 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.816566 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.816620 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.816633 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.816652 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.816664 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.855851 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:14 crc kubenswrapper[4954]: E1128 16:11:14.856181 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.919485 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.919512 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.919538 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.919551 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:14 crc kubenswrapper[4954]: I1128 16:11:14.919559 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:14Z","lastTransitionTime":"2025-11-28T16:11:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.025018 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.025092 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.025110 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.025138 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.025190 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.086470 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ee83a2e-2cf9-42c3-b884-11307ed972a9" containerID="fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923" exitCode=0 Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.086619 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerDied","Data":"fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.093762 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043" exitCode=0 Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.093865 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.108973 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.109250 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.112237 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.116503 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-mg7f9" event={"ID":"604274ad-4365-4c81-a94f-9a234d4aa997","Type":"ContainerStarted","Data":"46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.136093 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.136139 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.136197 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.136221 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.136238 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.137381 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.160515 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.180794 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.199307 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.218291 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.237414 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.239794 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.239946 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.240055 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.240178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.240293 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.252886 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.273249 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.288497 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.333416 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.342611 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.342639 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.342648 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.342661 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.342669 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.354067 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.381107 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.408357 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.420678 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.437942 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.444684 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.444716 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.444727 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.444742 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.444754 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.456152 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.474033 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.487810 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.506105 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.526220 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.544606 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.547210 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.547251 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.547262 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.547279 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.547291 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.565624 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.583054 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.594227 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.608055 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:15Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.650166 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.650218 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.650231 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.650252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.650265 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.753748 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.753799 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.753815 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.753834 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.753847 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.855445 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.855497 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:15 crc kubenswrapper[4954]: E1128 16:11:15.855604 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:15 crc kubenswrapper[4954]: E1128 16:11:15.855757 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.856392 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.856437 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.856450 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.856469 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.856481 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.959078 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.959129 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.959146 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.959168 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:15 crc kubenswrapper[4954]: I1128 16:11:15.959186 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:15Z","lastTransitionTime":"2025-11-28T16:11:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.061818 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.061877 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.061895 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.061917 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.061935 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.135778 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.152922 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.165046 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.165084 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.165095 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.165111 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.165122 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.170107 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.187707 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.204914 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.222684 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.244785 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.258945 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.268070 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.268135 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.268155 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.268181 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.268200 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.282086 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.306178 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.323387 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.339515 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.363419 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:16Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.370857 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.370916 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.370932 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.370955 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.370972 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.473264 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.473304 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.473315 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.473331 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.473342 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.577009 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.577355 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.577373 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.577397 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.577414 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.680079 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.680140 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.680163 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.680190 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.680209 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.783596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.783640 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.783651 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.783668 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.783680 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.855812 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:16 crc kubenswrapper[4954]: E1128 16:11:16.856026 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.886789 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.886847 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.886865 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.886888 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.886906 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.989626 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.989653 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.989664 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.989680 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:16 crc kubenswrapper[4954]: I1128 16:11:16.989691 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:16Z","lastTransitionTime":"2025-11-28T16:11:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.092733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.092785 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.092795 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.092808 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.092817 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.124918 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.124957 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.127171 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.129506 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerStarted","Data":"b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.157764 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.173892 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.194582 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.196750 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.196804 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.196821 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.196849 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.196868 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.210223 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.229504 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.247374 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.265298 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.285458 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.298878 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.298914 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.298927 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.298944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.298955 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.308738 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.325638 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.349259 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.362464 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.382989 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.400988 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.401036 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.401048 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.401065 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.401078 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.410647 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.425179 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.443100 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.458254 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.475616 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.495785 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.502966 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.503010 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.503024 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.503044 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.503056 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.514813 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.535664 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.550554 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.569958 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.591615 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.605632 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.605687 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.605701 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.605719 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.605731 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.607785 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.629519 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.708478 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.708550 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.708564 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.708582 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.708593 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.814254 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.814302 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.814318 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.814340 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.814356 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.856152 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:17 crc kubenswrapper[4954]: E1128 16:11:17.856301 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.856391 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:17 crc kubenswrapper[4954]: E1128 16:11:17.856604 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.875926 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.893663 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.909132 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.917582 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.917633 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.917650 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.917677 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.917697 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:17Z","lastTransitionTime":"2025-11-28T16:11:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.931236 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.943390 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.965252 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:17 crc kubenswrapper[4954]: I1128 16:11:17.988674 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.005499 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.019733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.019799 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.019814 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.019835 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.019847 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.042827 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.061421 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.076784 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.092419 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.111279 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.122085 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.122150 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.122169 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.122194 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.122211 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.137653 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.137839 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.140091 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ee83a2e-2cf9-42c3-b884-11307ed972a9" containerID="b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667" exitCode=0 Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.140245 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerDied","Data":"b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.157789 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.188447 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.206172 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.224764 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.225098 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.225241 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.225375 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.225498 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.227415 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.227590 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.227686 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.228227 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.228310 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:26.228284648 +0000 UTC m=+39.619953229 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.228393 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:26.228372241 +0000 UTC m=+39.620040872 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.228796 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.228959 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:26.228916888 +0000 UTC m=+39.620585469 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.233543 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.246354 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.261039 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.273420 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.293820 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.311166 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.328196 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.328400 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.328425 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.328437 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.328455 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328465 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.328467 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328488 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328505 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328575 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:26.32855786 +0000 UTC m=+39.720226391 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.328589 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328651 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328667 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328678 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.328712 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:26.328703234 +0000 UTC m=+39.720371775 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.329867 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.344556 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.366275 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.391784 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.430953 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.430992 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.431002 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.431017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.431028 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.533965 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.534009 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.534024 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.534041 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.534052 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.637028 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.637085 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.637096 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.637113 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.637124 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.739577 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.739641 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.739658 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.739681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.739701 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.842661 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.842738 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.842761 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.842792 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.842814 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.855294 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:18 crc kubenswrapper[4954]: E1128 16:11:18.855478 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.946027 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.946090 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.946107 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.946131 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:18 crc kubenswrapper[4954]: I1128 16:11:18.946150 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:18Z","lastTransitionTime":"2025-11-28T16:11:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.049653 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.049720 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.049743 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.049778 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.049799 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.152686 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.152730 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.152746 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.152767 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.152786 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.256819 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.256876 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.256892 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.256919 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.256937 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.359158 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.359208 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.359224 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.359247 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.359263 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.462052 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.462128 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.462153 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.462183 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.462204 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.565350 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.565418 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.565435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.565458 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.565476 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.668069 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.668113 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.668129 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.668206 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.668226 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.695058 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-vjmwt"] Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.695655 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.697650 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.698298 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.700908 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.702213 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.718349 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.741266 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.743773 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/50663cb6-2935-4019-83d0-1fd0e3100b8d-host\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.743877 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnfdv\" (UniqueName: \"kubernetes.io/projected/50663cb6-2935-4019-83d0-1fd0e3100b8d-kube-api-access-xnfdv\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.744035 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/50663cb6-2935-4019-83d0-1fd0e3100b8d-serviceca\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.762431 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.780760 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.780836 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.780860 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.780893 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.780917 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.783720 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.799839 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.822977 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.838306 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.845372 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnfdv\" (UniqueName: \"kubernetes.io/projected/50663cb6-2935-4019-83d0-1fd0e3100b8d-kube-api-access-xnfdv\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.845415 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/50663cb6-2935-4019-83d0-1fd0e3100b8d-serviceca\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.845462 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/50663cb6-2935-4019-83d0-1fd0e3100b8d-host\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.845516 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/50663cb6-2935-4019-83d0-1fd0e3100b8d-host\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.846440 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/50663cb6-2935-4019-83d0-1fd0e3100b8d-serviceca\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.857855 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.858849 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.858933 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:19 crc kubenswrapper[4954]: E1128 16:11:19.858987 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:19 crc kubenswrapper[4954]: E1128 16:11:19.859068 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.863872 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnfdv\" (UniqueName: \"kubernetes.io/projected/50663cb6-2935-4019-83d0-1fd0e3100b8d-kube-api-access-xnfdv\") pod \"node-ca-vjmwt\" (UID: \"50663cb6-2935-4019-83d0-1fd0e3100b8d\") " pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.876057 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.882600 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.882634 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.882645 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.882659 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.882669 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.905839 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.925370 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.945704 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.967485 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.984200 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:19Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.985973 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.986020 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.986034 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.986053 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:19 crc kubenswrapper[4954]: I1128 16:11:19.986066 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:19Z","lastTransitionTime":"2025-11-28T16:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.016313 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vjmwt" Nov 28 16:11:20 crc kubenswrapper[4954]: W1128 16:11:20.030461 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50663cb6_2935_4019_83d0_1fd0e3100b8d.slice/crio-69665d347d4e2537e620635db70b49a6fbbb95b95d8dad724cdf40373fe3ebbb WatchSource:0}: Error finding container 69665d347d4e2537e620635db70b49a6fbbb95b95d8dad724cdf40373fe3ebbb: Status 404 returned error can't find the container with id 69665d347d4e2537e620635db70b49a6fbbb95b95d8dad724cdf40373fe3ebbb Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.091042 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.091501 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.091801 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.092171 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.092387 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.155301 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vjmwt" event={"ID":"50663cb6-2935-4019-83d0-1fd0e3100b8d","Type":"ContainerStarted","Data":"69665d347d4e2537e620635db70b49a6fbbb95b95d8dad724cdf40373fe3ebbb"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.158214 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerStarted","Data":"de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.165824 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.196809 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.196879 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.196901 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.196931 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.196954 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.301357 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.301404 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.301414 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.301432 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.301441 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.404678 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.405024 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.405041 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.405063 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.405080 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.508266 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.508322 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.508342 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.508369 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.508390 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.612662 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.612735 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.612757 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.612789 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.612814 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.715671 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.715723 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.715740 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.715763 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.715781 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.818786 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.818856 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.818879 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.818908 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.818930 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.832408 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.832462 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.832480 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.832503 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.832547 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: E1128 16:11:20.851016 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:20Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.855311 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:20 crc kubenswrapper[4954]: E1128 16:11:20.855421 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.855999 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.856019 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.856028 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.856040 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.856051 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: E1128 16:11:20.874139 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:20Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.881839 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.881906 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.881932 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.881957 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.881976 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: E1128 16:11:20.899148 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:20Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.903408 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.903447 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.903462 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.903481 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.903495 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: E1128 16:11:20.918764 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:20Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.922845 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.922886 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.922897 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.922914 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.922926 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:20 crc kubenswrapper[4954]: E1128 16:11:20.940693 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:20Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:20 crc kubenswrapper[4954]: E1128 16:11:20.940918 4954 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.942671 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.942718 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.942735 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.942758 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:20 crc kubenswrapper[4954]: I1128 16:11:20.942774 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:20Z","lastTransitionTime":"2025-11-28T16:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.045114 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.045175 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.045192 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.045218 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.045236 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.148218 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.148278 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.148294 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.148317 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.148333 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.174664 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.177067 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vjmwt" event={"ID":"50663cb6-2935-4019-83d0-1fd0e3100b8d","Type":"ContainerStarted","Data":"ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.180727 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ee83a2e-2cf9-42c3-b884-11307ed972a9" containerID="de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5" exitCode=0 Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.180791 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerDied","Data":"de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.201884 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.220416 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.239300 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.250974 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.251033 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.251049 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.251075 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.251092 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.257985 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.280735 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.296441 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.318610 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.343218 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.353781 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.354030 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.354150 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.354209 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.354232 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.366312 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.389935 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.419005 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.438497 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.459079 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.459859 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.459916 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.459935 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.459963 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.459982 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.489618 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:21Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.565946 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.566033 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.566063 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.566096 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.566120 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.675303 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.675378 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.675406 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.675432 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.675449 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.784087 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.784164 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.784186 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.784216 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.784237 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.855385 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.855460 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:21 crc kubenswrapper[4954]: E1128 16:11:21.855602 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:21 crc kubenswrapper[4954]: E1128 16:11:21.855739 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.886798 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.886851 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.886874 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.886901 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.886922 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.989655 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.989737 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.989755 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.989779 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:21 crc kubenswrapper[4954]: I1128 16:11:21.989796 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:21Z","lastTransitionTime":"2025-11-28T16:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.092709 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.092764 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.092776 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.092797 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.092809 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.196005 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.196078 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.196096 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.196119 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.196138 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.298614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.298679 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.298696 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.298720 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.298736 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.401979 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.402054 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.402075 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.402104 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.402129 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.505779 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.505839 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.505857 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.505885 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.505907 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.609486 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.609566 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.609584 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.609609 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.609625 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.712308 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.712361 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.712373 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.712391 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.712403 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.815329 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.815384 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.815396 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.815413 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.815424 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.855892 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:22 crc kubenswrapper[4954]: E1128 16:11:22.856086 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.918561 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.918619 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.918635 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.918659 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:22 crc kubenswrapper[4954]: I1128 16:11:22.918676 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:22Z","lastTransitionTime":"2025-11-28T16:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.021180 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.021282 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.021303 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.021326 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.021343 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.124429 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.124480 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.124495 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.124517 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.124562 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.195490 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ee83a2e-2cf9-42c3-b884-11307ed972a9" containerID="738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd" exitCode=0 Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.195588 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerDied","Data":"738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.203573 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.218790 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.227219 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.227257 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.227274 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.227298 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.227314 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.250327 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.272938 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.294689 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.316218 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.330656 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.333877 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.333934 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.333953 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.333978 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.333997 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.344853 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.413799 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.426479 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.436403 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.436477 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.436489 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.436512 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.436544 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.445000 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.459257 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.472923 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.485330 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.496146 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.507291 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.519777 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.540654 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.540717 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.540736 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.540764 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.540785 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.545557 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.559567 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.584059 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.609495 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.629697 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.646563 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.646614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.646626 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.646646 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.646662 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.647102 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.663806 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.678733 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.694107 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.713520 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.736619 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.749637 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.749702 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.749715 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.749745 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.749757 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.757210 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:23Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.851649 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.851691 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.851699 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.851713 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.851722 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.859238 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.859519 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:23 crc kubenswrapper[4954]: E1128 16:11:23.859831 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:23 crc kubenswrapper[4954]: E1128 16:11:23.860042 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.953778 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.953828 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.953843 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.953862 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:23 crc kubenswrapper[4954]: I1128 16:11:23.953876 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:23Z","lastTransitionTime":"2025-11-28T16:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.057685 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.057740 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.057754 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.057773 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.057786 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.160157 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.160219 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.160236 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.160259 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.160279 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.213608 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerStarted","Data":"c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.235820 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.254882 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.264494 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.264558 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.264566 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.264583 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.264601 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.273566 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.291606 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.312633 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.334929 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.354217 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.366971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.367038 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.367061 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.367092 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.367117 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.376708 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.401216 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.418203 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.440094 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.455709 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.469839 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.469900 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.469918 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.469942 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.469962 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.527722 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.545407 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:24Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.572730 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.572792 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.572809 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.572835 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.572852 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.675849 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.676058 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.676206 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.676332 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.676444 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.779276 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.779339 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.779357 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.779384 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.779402 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.855459 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:24 crc kubenswrapper[4954]: E1128 16:11:24.855980 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.885058 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.885097 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.885109 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.885127 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.885141 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.988898 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.988957 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.988974 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.988999 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:24 crc kubenswrapper[4954]: I1128 16:11:24.989015 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:24Z","lastTransitionTime":"2025-11-28T16:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.091914 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.091979 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.091995 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.092017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.092033 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.194941 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.195005 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.195023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.195047 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.195072 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.297107 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.297174 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.297192 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.297253 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.297277 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.399480 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.399560 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.399580 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.399604 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.399623 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.502115 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.502171 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.502190 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.502214 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.502233 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.584227 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ee83a2e-2cf9-42c3-b884-11307ed972a9" containerID="c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b" exitCode=0 Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.584319 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerDied","Data":"c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.599927 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.606435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.606491 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.606506 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.606547 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.606574 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.614424 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.630055 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.646341 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.661346 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.677813 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.693822 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.709102 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.709133 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.709142 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.709155 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.709163 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.712340 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.731207 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.743155 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.761517 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.776923 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.795688 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.807146 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.811131 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.811167 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.811178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.811195 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.811206 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.829321 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk"] Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.830220 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.833698 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.833946 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.843625 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.855958 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.856054 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:25 crc kubenswrapper[4954]: E1128 16:11:25.856497 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.856060 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:25 crc kubenswrapper[4954]: E1128 16:11:25.857099 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.868014 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.881353 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.897375 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.910358 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.914152 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.914197 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.914214 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.914239 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.914257 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:25Z","lastTransitionTime":"2025-11-28T16:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.926986 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.929510 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bcb37cf-0813-4df2-9f5b-dfab430a96db-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.929659 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bcb37cf-0813-4df2-9f5b-dfab430a96db-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.929726 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54m8b\" (UniqueName: \"kubernetes.io/projected/1bcb37cf-0813-4df2-9f5b-dfab430a96db-kube-api-access-54m8b\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.929772 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bcb37cf-0813-4df2-9f5b-dfab430a96db-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.942750 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.962018 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:25 crc kubenswrapper[4954]: I1128 16:11:25.981631 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:25.999949 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:25Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.012077 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.016459 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.016481 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.016490 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.016502 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.016510 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.025222 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.031032 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bcb37cf-0813-4df2-9f5b-dfab430a96db-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.031126 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54m8b\" (UniqueName: \"kubernetes.io/projected/1bcb37cf-0813-4df2-9f5b-dfab430a96db-kube-api-access-54m8b\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.031170 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bcb37cf-0813-4df2-9f5b-dfab430a96db-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.031229 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bcb37cf-0813-4df2-9f5b-dfab430a96db-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.031731 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bcb37cf-0813-4df2-9f5b-dfab430a96db-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.034340 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bcb37cf-0813-4df2-9f5b-dfab430a96db-env-overrides\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.041017 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bcb37cf-0813-4df2-9f5b-dfab430a96db-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.051042 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.061790 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54m8b\" (UniqueName: \"kubernetes.io/projected/1bcb37cf-0813-4df2-9f5b-dfab430a96db-kube-api-access-54m8b\") pod \"ovnkube-control-plane-749d76644c-gx7hk\" (UID: \"1bcb37cf-0813-4df2-9f5b-dfab430a96db\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.068665 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.119021 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.119071 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.119088 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.119110 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.119126 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.147066 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" Nov 28 16:11:26 crc kubenswrapper[4954]: W1128 16:11:26.165225 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bcb37cf_0813_4df2_9f5b_dfab430a96db.slice/crio-6d8c417fae357e2fa5a87a2a7b0e6214bec987e0921ac8a4789269da3ceb7470 WatchSource:0}: Error finding container 6d8c417fae357e2fa5a87a2a7b0e6214bec987e0921ac8a4789269da3ceb7470: Status 404 returned error can't find the container with id 6d8c417fae357e2fa5a87a2a7b0e6214bec987e0921ac8a4789269da3ceb7470 Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.223605 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.223643 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.223663 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.223692 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.223715 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.233041 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.233273 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:11:42.233249483 +0000 UTC m=+55.624918054 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.233320 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.233381 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.233516 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.233596 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:42.233583004 +0000 UTC m=+55.625251575 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.234140 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.234268 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:42.234234686 +0000 UTC m=+55.625903257 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.327304 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.327355 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.327369 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.327386 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.327399 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.334838 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.334902 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335040 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335083 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335045 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335102 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335114 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335131 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335165 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:42.335145738 +0000 UTC m=+55.726814289 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.335188 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:42.335177969 +0000 UTC m=+55.726846520 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.430797 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.430858 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.430876 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.430901 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.430918 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.535416 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.535659 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.535687 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.535722 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.535749 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.589067 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-mrxnm"] Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.589548 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.589616 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.589738 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" event={"ID":"1bcb37cf-0813-4df2-9f5b-dfab430a96db","Type":"ContainerStarted","Data":"6d8c417fae357e2fa5a87a2a7b0e6214bec987e0921ac8a4789269da3ceb7470"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.598054 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.610000 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.629459 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.638051 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.638092 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.638104 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.638121 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.638132 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.642440 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.659156 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.677718 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.696443 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.710377 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.721780 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.736273 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.738869 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4fx5\" (UniqueName: \"kubernetes.io/projected/19107b9e-d11e-4360-a6c8-dfc9e96a8623-kube-api-access-r4fx5\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.738956 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.740948 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.740986 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.740997 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.741017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.741029 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.754099 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.785673 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.801697 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.823162 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.839988 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4fx5\" (UniqueName: \"kubernetes.io/projected/19107b9e-d11e-4360-a6c8-dfc9e96a8623-kube-api-access-r4fx5\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.840067 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.840258 4954 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.840342 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs podName:19107b9e-d11e-4360-a6c8-dfc9e96a8623 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:27.340319648 +0000 UTC m=+40.731988229 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs") pod "network-metrics-daemon-mrxnm" (UID: "19107b9e-d11e-4360-a6c8-dfc9e96a8623") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.844305 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.844330 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.844339 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.844354 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.844372 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.845866 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.855543 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:26 crc kubenswrapper[4954]: E1128 16:11:26.855694 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.863087 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.869282 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4fx5\" (UniqueName: \"kubernetes.io/projected/19107b9e-d11e-4360-a6c8-dfc9e96a8623-kube-api-access-r4fx5\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.882588 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:26Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.948201 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.948268 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.948286 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.948313 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:26 crc kubenswrapper[4954]: I1128 16:11:26.948335 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:26Z","lastTransitionTime":"2025-11-28T16:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.050808 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.050886 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.050911 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.050942 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.050965 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.154104 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.154157 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.154205 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.154237 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.154260 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.257477 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.257949 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.257970 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.257998 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.258017 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.346661 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:27 crc kubenswrapper[4954]: E1128 16:11:27.346837 4954 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:27 crc kubenswrapper[4954]: E1128 16:11:27.346926 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs podName:19107b9e-d11e-4360-a6c8-dfc9e96a8623 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:28.346904665 +0000 UTC m=+41.738573226 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs") pod "network-metrics-daemon-mrxnm" (UID: "19107b9e-d11e-4360-a6c8-dfc9e96a8623") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.359816 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.359860 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.359874 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.359895 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.359911 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.463930 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.463996 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.464013 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.464039 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.464057 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.567162 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.567220 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.567237 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.567260 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.567277 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.606757 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerStarted","Data":"a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.670253 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.670332 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.670355 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.670382 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.670400 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.774687 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.774764 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.774790 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.774823 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.774846 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.856251 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.856261 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.856400 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:27 crc kubenswrapper[4954]: E1128 16:11:27.856608 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:27 crc kubenswrapper[4954]: E1128 16:11:27.856759 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:27 crc kubenswrapper[4954]: E1128 16:11:27.858201 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.878678 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.879380 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.879411 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.879422 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.879440 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.879451 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.898227 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.917758 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.939090 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.963857 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.982131 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.982258 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.982279 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.982307 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.982368 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:27Z","lastTransitionTime":"2025-11-28T16:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:27 crc kubenswrapper[4954]: I1128 16:11:27.985646 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:27Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.006632 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.030601 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.046957 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.065114 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.086017 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.086752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.086820 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.086843 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.086872 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.086894 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.103430 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.124209 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.156410 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.176036 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.191369 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.191413 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.191429 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.191455 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.191475 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.208121 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:28Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.295071 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.295326 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.295454 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.295637 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.295782 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.363292 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:28 crc kubenswrapper[4954]: E1128 16:11:28.363674 4954 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:28 crc kubenswrapper[4954]: E1128 16:11:28.363796 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs podName:19107b9e-d11e-4360-a6c8-dfc9e96a8623 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:30.36376735 +0000 UTC m=+43.755435931 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs") pod "network-metrics-daemon-mrxnm" (UID: "19107b9e-d11e-4360-a6c8-dfc9e96a8623") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.399219 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.399278 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.399301 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.399329 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.399350 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.501686 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.501747 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.501769 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.501800 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.501820 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.605330 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.605580 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.605721 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.605858 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.605986 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.709044 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.709302 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.709470 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.709742 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.710092 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.813944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.814008 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.814028 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.814058 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.814079 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.855579 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:28 crc kubenswrapper[4954]: E1128 16:11:28.855782 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.916851 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.916905 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.916924 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.916954 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:28 crc kubenswrapper[4954]: I1128 16:11:28.916978 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:28Z","lastTransitionTime":"2025-11-28T16:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.019315 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.019372 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.019395 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.019422 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.019441 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.122435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.122506 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.122574 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.122605 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.122626 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.225553 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.225601 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.225614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.225635 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.225648 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.328473 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.328605 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.328625 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.328647 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.328668 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.431498 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.431588 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.431608 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.431631 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.431647 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.535934 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.535990 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.536006 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.536030 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.536047 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.639033 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.639090 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.639108 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.639130 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.639148 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.742459 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.742514 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.742563 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.742585 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.742602 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.845649 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.845712 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.845734 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.845765 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.845790 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.855158 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:29 crc kubenswrapper[4954]: E1128 16:11:29.855351 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.855461 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.855479 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:29 crc kubenswrapper[4954]: E1128 16:11:29.855624 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:29 crc kubenswrapper[4954]: E1128 16:11:29.855890 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.856799 4954 scope.go:117] "RemoveContainer" containerID="4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.949220 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.949303 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.949322 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.949350 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:29 crc kubenswrapper[4954]: I1128 16:11:29.949370 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:29Z","lastTransitionTime":"2025-11-28T16:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.056508 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.056631 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.056660 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.056695 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.056733 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.160215 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.160293 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.160313 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.160346 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.160407 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.263988 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.264099 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.264118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.264147 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.264166 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.368330 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.368405 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.368429 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.368462 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.368486 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.385449 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:30 crc kubenswrapper[4954]: E1128 16:11:30.385900 4954 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:30 crc kubenswrapper[4954]: E1128 16:11:30.386060 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs podName:19107b9e-d11e-4360-a6c8-dfc9e96a8623 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:34.386018914 +0000 UTC m=+47.777687495 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs") pod "network-metrics-daemon-mrxnm" (UID: "19107b9e-d11e-4360-a6c8-dfc9e96a8623") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.471773 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.471857 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.471877 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.471908 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.471934 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.575054 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.575120 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.575145 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.575175 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.575198 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.619194 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.619268 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.638779 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.654735 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.671406 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.678712 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.678755 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.678772 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.678791 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.678806 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.688421 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.704416 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.722338 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.742030 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.766033 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.781700 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.781757 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.781772 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.781794 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.781808 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.781779 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.802917 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.818157 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.832785 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.845351 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.856257 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:30 crc kubenswrapper[4954]: E1128 16:11:30.856470 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.861591 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.881949 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.884660 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.884726 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.884750 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.884782 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.884809 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.911897 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:30Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.987825 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.987874 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.987886 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.987905 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:30 crc kubenswrapper[4954]: I1128 16:11:30.987919 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:30Z","lastTransitionTime":"2025-11-28T16:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.090832 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.090905 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.090930 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.090954 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.090971 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.179894 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.179993 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.191104 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.191162 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.191173 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.191194 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.191209 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.197053 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.208932 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.213654 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.213714 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.213733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.213757 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.213774 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.225306 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.242840 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.247953 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.248014 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.248032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.248087 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.248107 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.257740 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.267355 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.271325 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.271387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.271406 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.271433 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.271450 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.273324 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.291987 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.295063 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.315056 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.335675 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.353879 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.376940 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.385118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.385450 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.385517 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.385598 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.385692 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.404554 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.407491 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.407761 4954 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.410001 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.410037 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.410053 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.410077 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.410093 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.418769 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.438348 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.456988 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.476655 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.493484 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.506684 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.512229 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.512255 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.512263 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.512277 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.512288 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.519616 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.533391 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.552853 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.569454 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.583659 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.605167 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.621338 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.621394 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.621411 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.621438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.621456 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.627390 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.631014 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ee83a2e-2cf9-42c3-b884-11307ed972a9" containerID="a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766" exitCode=0 Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.631119 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerDied","Data":"a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.633418 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" event={"ID":"1bcb37cf-0813-4df2-9f5b-dfab430a96db","Type":"ContainerStarted","Data":"407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.633491 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.644773 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.674443 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.699465 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.711038 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.723779 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.723823 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.723838 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.723883 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.723901 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.725164 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.741288 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.760416 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.776421 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.799155 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.812337 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.825947 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.827145 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.827193 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.827206 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.827226 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.827275 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.838377 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.854514 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.855486 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.855492 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.856110 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.856392 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.859691 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:31 crc kubenswrapper[4954]: E1128 16:11:31.859858 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.864160 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.880404 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.894482 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.904465 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.916627 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.927963 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.929850 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.929889 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.929901 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.929917 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.929930 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:31Z","lastTransitionTime":"2025-11-28T16:11:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:31 crc kubenswrapper[4954]: I1128 16:11:31.967473 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:31Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.003987 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.017458 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.030308 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.032372 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.032393 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.032400 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.032412 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.032439 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.041325 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.054008 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:32Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.134903 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.135002 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.135022 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.135077 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.135098 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.238387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.238428 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.238440 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.238455 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.238466 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.340323 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.340363 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.340374 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.340393 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.340405 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.443369 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.443427 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.443444 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.443462 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.443474 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.546754 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.546806 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.546822 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.546847 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.546866 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.639046 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.641644 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.641742 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.649299 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.649353 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.649398 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.649422 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.649439 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.751944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.752000 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.752016 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.752035 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.752047 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.855105 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.855166 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.855210 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.855226 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.855251 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: E1128 16:11:32.855252 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.855268 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.958217 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.958259 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.958275 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.958296 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:32 crc kubenswrapper[4954]: I1128 16:11:32.958312 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:32Z","lastTransitionTime":"2025-11-28T16:11:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.061692 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.061753 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.061772 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.061797 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.061819 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.164943 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.165024 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.165046 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.165078 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.165099 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.268310 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.268398 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.268426 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.268514 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.268581 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.372351 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.372411 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.372429 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.372453 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.372466 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.477118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.477181 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.477204 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.477243 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.477268 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.580791 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.580835 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.580875 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.580899 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.580915 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.648276 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" event={"ID":"1bcb37cf-0813-4df2-9f5b-dfab430a96db","Type":"ContainerStarted","Data":"f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.653035 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" event={"ID":"9ee83a2e-2cf9-42c3-b884-11307ed972a9","Type":"ContainerStarted","Data":"32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.653414 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.667294 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.683514 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.683664 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.683724 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.683741 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.683766 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.683788 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.707878 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.725358 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.741210 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.759668 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.776926 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.786675 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.786714 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.786724 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.786743 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.786755 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.792458 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.812178 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.826816 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.848871 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.858358 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:33 crc kubenswrapper[4954]: E1128 16:11:33.858472 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.860150 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:33 crc kubenswrapper[4954]: E1128 16:11:33.860234 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.860290 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:33 crc kubenswrapper[4954]: E1128 16:11:33.860358 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.864331 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.884419 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.895082 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.895117 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.895129 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.895146 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.895157 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.898195 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.912016 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.925059 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.937337 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.953300 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.977145 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.992050 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:33Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.997884 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.997938 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.997956 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.997982 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:33 crc kubenswrapper[4954]: I1128 16:11:33.998000 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:33Z","lastTransitionTime":"2025-11-28T16:11:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.011849 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.030353 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.049121 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.071352 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.091877 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.101435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.101477 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.101495 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.101516 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.101562 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.112232 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.131182 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.147486 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.166945 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.192445 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.204450 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.204494 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.204510 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.204558 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.204576 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.218809 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.237220 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:34Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.308161 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.308215 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.308235 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.308261 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.308282 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.411367 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.411411 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.411446 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.411466 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.411478 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.446225 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:34 crc kubenswrapper[4954]: E1128 16:11:34.446361 4954 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:34 crc kubenswrapper[4954]: E1128 16:11:34.446410 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs podName:19107b9e-d11e-4360-a6c8-dfc9e96a8623 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:42.446396621 +0000 UTC m=+55.838065162 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs") pod "network-metrics-daemon-mrxnm" (UID: "19107b9e-d11e-4360-a6c8-dfc9e96a8623") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.513423 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.513458 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.513467 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.513481 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.513492 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.615776 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.615829 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.615848 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.615871 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.615891 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.718489 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.718548 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.718556 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.718572 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.718581 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.821243 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.821268 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.821276 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.821289 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.821297 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.855594 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:34 crc kubenswrapper[4954]: E1128 16:11:34.855698 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.924009 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.924054 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.924067 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.924085 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:34 crc kubenswrapper[4954]: I1128 16:11:34.924097 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:34Z","lastTransitionTime":"2025-11-28T16:11:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.026565 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.026611 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.026624 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.026641 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.026654 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.129130 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.129375 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.129436 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.129495 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.129593 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.232029 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.232088 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.232113 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.232142 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.232166 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.335100 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.335139 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.335151 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.335169 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.335184 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.437271 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.437320 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.437337 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.437359 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.437378 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.539927 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.539951 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.539958 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.539970 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.539979 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.642126 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.642167 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.642184 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.642207 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.642224 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.744709 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.744790 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.744813 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.744842 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.744867 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.848077 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.848147 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.848168 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.848193 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.848212 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.855475 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.855496 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.855510 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:35 crc kubenswrapper[4954]: E1128 16:11:35.855661 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:35 crc kubenswrapper[4954]: E1128 16:11:35.855746 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:35 crc kubenswrapper[4954]: E1128 16:11:35.855823 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.951206 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.951256 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.951274 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.951298 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:35 crc kubenswrapper[4954]: I1128 16:11:35.951316 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:35Z","lastTransitionTime":"2025-11-28T16:11:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.053345 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.053593 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.053720 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.053867 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.053993 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.157436 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.157775 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.158009 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.158208 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.158358 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.261376 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.261412 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.261420 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.261433 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.261445 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.363990 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.364048 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.364065 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.364092 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.364108 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.466630 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.466691 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.466708 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.466733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.466760 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.570325 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.570426 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.570446 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.570513 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.570608 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.666676 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/0.log" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.671247 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.671287 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f" exitCode=1 Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.672040 4954 scope.go:117] "RemoveContainer" containerID="6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.676258 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.676295 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.676313 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.676338 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.676356 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.691025 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.709161 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.743180 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:36Z\\\",\\\"message\\\":\\\"/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.014315 6354 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 16:11:35.014047 6354 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.014700 6354 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.014116 6354 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.013943 6354 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.015374 6354 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.015517 6354 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.015903 6354 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:11:35.015989 6354 factory.go:656] Stopping watch factory\\\\nI1128 16:11:35.016011 6354 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.760052 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.779510 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.779752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.779795 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.779811 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.779895 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.779913 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.797976 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.817892 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.838587 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.855850 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:36 crc kubenswrapper[4954]: E1128 16:11:36.855999 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.861666 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.880172 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.882853 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.882898 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.882915 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.882937 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.882955 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.902857 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.920175 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.937341 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.961822 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.983105 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:36Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.988034 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.988171 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.988191 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.988214 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:36 crc kubenswrapper[4954]: I1128 16:11:36.988230 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:36Z","lastTransitionTime":"2025-11-28T16:11:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.003111 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.090891 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.090938 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.090953 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.090974 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.090991 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.193946 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.194006 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.194023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.194048 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.194069 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.296618 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.296701 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.296726 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.296758 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.296782 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.400132 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.400192 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.400214 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.400242 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.400268 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.503200 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.503263 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.503279 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.503302 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.503321 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.606316 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.606378 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.606396 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.606429 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.606452 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.709711 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.709800 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.709819 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.709846 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.709865 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.813102 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.813160 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.813179 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.813205 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.813222 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.855661 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.855787 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:37 crc kubenswrapper[4954]: E1128 16:11:37.855884 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.855981 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:37 crc kubenswrapper[4954]: E1128 16:11:37.856200 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:37 crc kubenswrapper[4954]: E1128 16:11:37.856383 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.880287 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.902369 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.917213 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.917281 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.917301 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.917327 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.917345 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:37Z","lastTransitionTime":"2025-11-28T16:11:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.923078 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.939459 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.962113 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:37 crc kubenswrapper[4954]: I1128 16:11:37.989197 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:37Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.001834 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.012880 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.019485 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.019518 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.019558 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.019624 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.019638 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.024380 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.036399 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.056621 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:36Z\\\",\\\"message\\\":\\\"/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.014315 6354 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 16:11:35.014047 6354 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.014700 6354 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.014116 6354 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.013943 6354 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.015374 6354 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.015517 6354 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.015903 6354 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:11:35.015989 6354 factory.go:656] Stopping watch factory\\\\nI1128 16:11:35.016011 6354 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.070003 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.085884 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.103723 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.123380 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.123438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.123455 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.123481 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.123502 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.123702 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.140778 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.226119 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.226184 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.226203 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.226228 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.226247 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.329673 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.329811 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.329838 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.329866 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.329885 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.432900 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.432954 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.432971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.432996 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.433014 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.536361 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.536433 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.536450 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.536474 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.536490 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.638962 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.639006 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.639022 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.639041 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.639053 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.686018 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/0.log" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.690646 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.690831 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.713140 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:36Z\\\",\\\"message\\\":\\\"/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.014315 6354 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 16:11:35.014047 6354 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.014700 6354 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.014116 6354 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.013943 6354 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.015374 6354 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.015517 6354 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.015903 6354 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:11:35.015989 6354 factory.go:656] Stopping watch factory\\\\nI1128 16:11:35.016011 6354 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.735887 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.740893 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.740933 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.740944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.740962 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.740975 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.753257 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.771291 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.787001 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.806589 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.818382 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.830971 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.843119 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.843163 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.843174 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.843191 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.843204 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.846877 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.855696 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:38 crc kubenswrapper[4954]: E1128 16:11:38.855842 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.862677 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.884301 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.904603 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.915778 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.929790 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.940764 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.944979 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.945017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.945029 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.945046 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.945057 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:38Z","lastTransitionTime":"2025-11-28T16:11:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:38 crc kubenswrapper[4954]: I1128 16:11:38.951782 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:38Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.047966 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.047999 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.048007 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.048023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.048032 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.150651 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.150723 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.150741 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.150770 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.150788 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.253906 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.253965 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.253986 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.254009 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.254028 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.356747 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.356791 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.356806 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.356827 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.356844 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.460679 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.460738 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.460756 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.460780 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.460799 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.563336 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.563397 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.563415 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.563439 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.563454 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.666572 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.666614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.666626 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.666641 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.666652 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.696001 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/1.log" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.696846 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/0.log" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.699504 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1" exitCode=1 Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.699565 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.699620 4954 scope.go:117] "RemoveContainer" containerID="6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.700388 4954 scope.go:117] "RemoveContainer" containerID="1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1" Nov 28 16:11:39 crc kubenswrapper[4954]: E1128 16:11:39.700637 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.719346 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.749003 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:36Z\\\",\\\"message\\\":\\\"/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.014315 6354 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 16:11:35.014047 6354 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.014700 6354 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.014116 6354 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.013943 6354 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.015374 6354 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.015517 6354 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.015903 6354 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:11:35.015989 6354 factory.go:656] Stopping watch factory\\\\nI1128 16:11:35.016011 6354 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"message\\\":\\\"etry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209491 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1128 16:11:39.209504 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209520 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1128 16:11:39.209447 6622 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:39.209583 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209594 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209608 6622 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1128 16:11:39.209649 6622 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1128 16:11:39.209663 6622 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.763202 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.769070 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.769104 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.769112 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.769126 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.769137 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.778716 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.796918 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.814977 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.818146 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.827830 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.830959 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.849942 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.855389 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.855503 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:39 crc kubenswrapper[4954]: E1128 16:11:39.855689 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.855727 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:39 crc kubenswrapper[4954]: E1128 16:11:39.856279 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:39 crc kubenswrapper[4954]: E1128 16:11:39.856392 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.869812 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.871653 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.871694 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.871709 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.871730 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.871745 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.889918 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.908976 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.935011 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.957584 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.972737 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.974884 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.974938 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.974956 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.974979 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.974998 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:39Z","lastTransitionTime":"2025-11-28T16:11:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:39 crc kubenswrapper[4954]: I1128 16:11:39.991860 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:39Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.014373 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.032387 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.050579 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.069664 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.077907 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.077962 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.077980 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.078004 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.078021 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.089472 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.105238 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.128388 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.152618 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.169969 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.180125 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.180190 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.180214 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.180245 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.180269 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.187904 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.207142 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.228253 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.250682 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.267280 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.282859 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.283087 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.283131 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.283151 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.283176 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.283195 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.319198 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.364977 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6942fe14c9cf3d038065a54216f9e78509b70b432836ab35515fd62d96aa0a1f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:36Z\\\",\\\"message\\\":\\\"/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.014315 6354 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 16:11:35.014047 6354 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.014700 6354 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.014116 6354 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.013943 6354 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 16:11:35.015374 6354 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 16:11:35.015517 6354 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 16:11:35.015903 6354 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 16:11:35.015989 6354 factory.go:656] Stopping watch factory\\\\nI1128 16:11:35.016011 6354 ovnkube.go:599] Stopped ovnkube\\\\nI1128 16:11:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"message\\\":\\\"etry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209491 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1128 16:11:39.209504 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209520 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1128 16:11:39.209447 6622 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:39.209583 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209594 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209608 6622 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1128 16:11:39.209649 6622 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1128 16:11:39.209663 6622 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.376997 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:40Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.385681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.385719 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.385733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.385752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.385768 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.488567 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.488632 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.488649 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.488673 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.488696 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.591585 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.591657 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.591681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.591708 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.591764 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.695408 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.695469 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.695489 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.695514 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.695557 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.705677 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/1.log" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.798103 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.798184 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.798216 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.798252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.798274 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.855793 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:40 crc kubenswrapper[4954]: E1128 16:11:40.855994 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.901348 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.901413 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.901438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.901469 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:40 crc kubenswrapper[4954]: I1128 16:11:40.901490 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:40Z","lastTransitionTime":"2025-11-28T16:11:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.004932 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.005008 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.005032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.005064 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.005091 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.112718 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.112785 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.112807 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.112832 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.112850 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.217078 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.217138 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.217155 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.217178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.217198 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.320846 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.320905 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.320926 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.320956 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.320976 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.424284 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.424355 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.424378 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.424408 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.424432 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.527455 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.527512 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.527560 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.527584 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.527603 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.630708 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.630780 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.630805 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.630836 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.630859 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.684251 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.685655 4954 scope.go:117] "RemoveContainer" containerID="1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.685981 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.704678 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.727339 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.734014 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.734074 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.734098 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.734129 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.734168 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.748418 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.767922 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.783122 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.783444 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.783784 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.783945 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.784114 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.786489 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.804755 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.808566 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.810438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.810492 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.810510 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.810562 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.810585 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.831724 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.835311 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.837034 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.837073 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.837090 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.837114 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.837132 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.853007 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.855194 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.855382 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.855734 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.855787 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.855902 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.856023 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.859965 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.864584 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.864628 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.864647 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.864668 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.864684 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.880378 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.885011 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.890124 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.890183 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.890200 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.890223 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.890241 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.905279 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.914878 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: E1128 16:11:41.915129 4954 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.920281 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.920574 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.920808 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.920996 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.921181 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:41Z","lastTransitionTime":"2025-11-28T16:11:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.931175 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.954434 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.972969 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:41 crc kubenswrapper[4954]: I1128 16:11:41.989128 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:41Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.007952 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.025152 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.025211 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.025230 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.025256 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.025274 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.039974 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"message\\\":\\\"etry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209491 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1128 16:11:39.209504 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209520 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1128 16:11:39.209447 6622 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:39.209583 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209594 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209608 6622 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1128 16:11:39.209649 6622 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1128 16:11:39.209663 6622 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.057865 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:42Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.129807 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.129865 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.129884 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.129910 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.129929 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.233472 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.233602 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.233625 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.233659 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.233681 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.329268 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:14.329210783 +0000 UTC m=+87.720879364 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.329290 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.329794 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.329941 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.330009 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.330100 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:14.330076011 +0000 UTC m=+87.721744742 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.330178 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.330266 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:14.330242196 +0000 UTC m=+87.721910777 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.337019 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.337090 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.337114 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.337148 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.337172 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.431611 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.431748 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.431917 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.431939 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.431960 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.431969 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.431981 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.431990 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.432079 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:14.432043868 +0000 UTC m=+87.823712499 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.432120 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:14.43210206 +0000 UTC m=+87.823770781 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.444484 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.444578 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.444600 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.444666 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.444689 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.532866 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.533109 4954 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.533252 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs podName:19107b9e-d11e-4360-a6c8-dfc9e96a8623 nodeName:}" failed. No retries permitted until 2025-11-28 16:11:58.533221679 +0000 UTC m=+71.924890260 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs") pod "network-metrics-daemon-mrxnm" (UID: "19107b9e-d11e-4360-a6c8-dfc9e96a8623") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.547896 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.547948 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.547962 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.547982 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.547996 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.651210 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.651272 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.651291 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.651318 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.651335 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.754170 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.754218 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.754235 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.754260 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.754278 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.855510 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:42 crc kubenswrapper[4954]: E1128 16:11:42.855774 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.857427 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.857492 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.857513 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.857649 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.857680 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.961126 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.961204 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.961224 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.961247 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:42 crc kubenswrapper[4954]: I1128 16:11:42.961264 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:42Z","lastTransitionTime":"2025-11-28T16:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.064841 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.064950 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.064975 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.065005 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.065023 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.170208 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.170271 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.170289 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.170320 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.170339 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.273746 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.273830 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.273851 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.273880 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.273900 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.377475 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.377576 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.377596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.377622 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.377644 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.481194 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.481252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.481266 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.481289 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.481309 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.584750 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.584817 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.584828 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.584850 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.584863 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.688792 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.688862 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.688880 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.688912 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.688932 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.792203 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.792260 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.792275 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.792298 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.792319 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.856042 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.856189 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.856252 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:43 crc kubenswrapper[4954]: E1128 16:11:43.856336 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:43 crc kubenswrapper[4954]: E1128 16:11:43.856745 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:43 crc kubenswrapper[4954]: E1128 16:11:43.856864 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.896118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.896181 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.896199 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.896225 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:43 crc kubenswrapper[4954]: I1128 16:11:43.896243 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:43Z","lastTransitionTime":"2025-11-28T16:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:43.999930 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:43.999990 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.000006 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.000031 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.000049 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.102793 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.102846 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.102863 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.102886 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.102904 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.205677 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.205753 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.205776 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.205806 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.205828 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.308149 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.308202 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.308219 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.308241 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.308257 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.411318 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.411387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.411412 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.411442 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.411466 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.514513 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.515580 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.515763 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.515895 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.516015 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.619757 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.619813 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.619824 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.619843 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.619862 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.722628 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.722902 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.723043 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.723170 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.723309 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.826630 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.826684 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.826703 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.826730 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.826747 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.855168 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:44 crc kubenswrapper[4954]: E1128 16:11:44.855345 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.929956 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.930013 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.930034 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.930109 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:44 crc kubenswrapper[4954]: I1128 16:11:44.930168 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:44Z","lastTransitionTime":"2025-11-28T16:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.033739 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.033807 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.033828 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.033855 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.033877 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.136948 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.137018 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.137045 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.137077 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.137100 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.240321 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.240396 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.240417 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.240446 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.240468 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.343606 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.343680 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.343697 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.343723 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.343744 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.446850 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.446899 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.446917 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.446939 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.446958 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.550229 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.550300 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.550324 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.550355 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.550377 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.653410 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.653870 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.654012 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.654150 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.654313 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.757748 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.757817 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.757840 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.757868 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.757890 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.856064 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:45 crc kubenswrapper[4954]: E1128 16:11:45.856311 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.856432 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.856650 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:45 crc kubenswrapper[4954]: E1128 16:11:45.856950 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:45 crc kubenswrapper[4954]: E1128 16:11:45.856739 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.861215 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.861297 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.861315 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.861337 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.861354 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.966023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.966094 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.966115 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.966140 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:45 crc kubenswrapper[4954]: I1128 16:11:45.966158 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:45Z","lastTransitionTime":"2025-11-28T16:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.068444 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.068503 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.068521 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.068596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.068618 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.173053 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.173494 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.173755 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.174583 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.174648 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.279707 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.280094 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.280353 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.280522 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.280699 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.384497 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.384593 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.384614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.384638 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.384656 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.488289 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.488345 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.488362 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.488385 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.488402 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.591284 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.591350 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.591367 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.591394 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.591413 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.695195 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.695249 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.695265 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.695291 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.695308 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.798610 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.798677 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.798694 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.798719 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.798738 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.855106 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:46 crc kubenswrapper[4954]: E1128 16:11:46.855291 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.902385 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.902497 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.902568 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.902605 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:46 crc kubenswrapper[4954]: I1128 16:11:46.902627 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:46Z","lastTransitionTime":"2025-11-28T16:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.006350 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.006684 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.006778 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.006865 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.006942 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.110815 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.110901 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.110926 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.110957 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.110980 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.218406 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.218474 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.218494 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.218520 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.218563 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.322051 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.322119 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.322137 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.322160 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.322177 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.425263 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.425332 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.425349 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.425376 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.425392 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.528269 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.528371 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.528387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.528411 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.528428 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.631561 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.632338 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.632513 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.632695 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.632825 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.736257 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.736311 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.736328 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.736351 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.736367 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.840296 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.840382 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.840401 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.840425 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.840442 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.855786 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.855895 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.855801 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:47 crc kubenswrapper[4954]: E1128 16:11:47.856056 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:47 crc kubenswrapper[4954]: E1128 16:11:47.856157 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:47 crc kubenswrapper[4954]: E1128 16:11:47.856398 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.876376 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.900814 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.933821 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"message\\\":\\\"etry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209491 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1128 16:11:39.209504 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209520 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1128 16:11:39.209447 6622 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:39.209583 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209594 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209608 6622 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1128 16:11:39.209649 6622 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1128 16:11:39.209663 6622 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.943756 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.943793 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.943825 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.943858 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.943870 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:47Z","lastTransitionTime":"2025-11-28T16:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.952695 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.972705 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:47 crc kubenswrapper[4954]: I1128 16:11:47.992916 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:47Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.013131 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.031197 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.047136 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.047186 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.047205 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.047279 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.047299 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.050927 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.067669 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.087818 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.106804 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.125317 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.147046 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.150056 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.150090 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.150102 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.150117 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.150128 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.167905 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.185568 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.204523 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:48Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.252387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.252418 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.252429 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.252444 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.252456 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.356341 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.356423 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.356442 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.356484 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.356515 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.460884 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.461280 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.461304 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.461339 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.461362 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.565032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.565101 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.565118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.565150 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.565198 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.667753 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.667807 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.667825 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.667849 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.667869 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.770495 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.770587 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.770610 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.770642 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.770672 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.855499 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:48 crc kubenswrapper[4954]: E1128 16:11:48.855763 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.876018 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.876082 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.876101 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.876124 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.876141 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.979521 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.979620 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.979676 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.979708 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:48 crc kubenswrapper[4954]: I1128 16:11:48.979729 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:48Z","lastTransitionTime":"2025-11-28T16:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.082852 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.082918 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.082944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.082975 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.082997 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.186017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.186082 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.186123 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.186149 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.186167 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.289658 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.289722 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.289743 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.289768 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.289785 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.392974 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.393043 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.393060 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.393085 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.393104 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.496830 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.496900 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.496911 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.496954 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.496967 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.541964 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.564616 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.587745 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.601272 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.601320 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.601334 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.601356 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.601372 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.605746 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.628124 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.648403 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.667173 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.692784 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.704211 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.704269 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.704281 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.704301 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.704313 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.706844 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.719972 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.733476 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.749364 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.769458 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"message\\\":\\\"etry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209491 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1128 16:11:39.209504 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209520 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1128 16:11:39.209447 6622 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:39.209583 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209594 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209608 6622 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1128 16:11:39.209649 6622 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1128 16:11:39.209663 6622 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.782246 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.800072 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.806487 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.806536 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.806545 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.806558 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.806567 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.818119 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.836265 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.849822 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:49Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.855238 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.855290 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.855238 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:49 crc kubenswrapper[4954]: E1128 16:11:49.855380 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:49 crc kubenswrapper[4954]: E1128 16:11:49.855485 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:49 crc kubenswrapper[4954]: E1128 16:11:49.855602 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.909234 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.909313 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.909330 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.909353 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:49 crc kubenswrapper[4954]: I1128 16:11:49.909367 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:49Z","lastTransitionTime":"2025-11-28T16:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.012064 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.012125 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.012147 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.012175 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.012198 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.114801 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.114899 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.114917 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.114971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.114991 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.217910 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.217990 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.218017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.218049 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.218070 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.321752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.321812 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.321831 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.321855 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.321872 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.425858 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.425921 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.425942 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.425970 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.425987 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.529253 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.529317 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.529336 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.529362 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.529382 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.632500 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.632608 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.632631 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.632660 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.632681 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.735401 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.735475 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.735499 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.735559 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.735586 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.838567 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.838640 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.838661 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.838694 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.838720 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.856140 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:50 crc kubenswrapper[4954]: E1128 16:11:50.856375 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.941752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.941869 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.941887 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.941911 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:50 crc kubenswrapper[4954]: I1128 16:11:50.941926 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:50Z","lastTransitionTime":"2025-11-28T16:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.044928 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.044974 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.044986 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.045005 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.045020 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.148818 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.148895 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.148917 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.148944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.148963 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.251831 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.251886 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.251898 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.251916 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.251928 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.354518 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.354607 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.354625 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.354647 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.354673 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.457784 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.457851 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.457870 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.457901 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.457923 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.560513 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.560593 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.560617 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.560684 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.560712 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.663831 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.663924 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.663957 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.663989 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.664014 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.767212 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.767279 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.767373 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.767403 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.767421 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.855465 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.855639 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:51 crc kubenswrapper[4954]: E1128 16:11:51.855731 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.855757 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:51 crc kubenswrapper[4954]: E1128 16:11:51.855923 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:51 crc kubenswrapper[4954]: E1128 16:11:51.856048 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.869570 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.869606 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.869614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.869626 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.869636 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.971963 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.972002 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.972012 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.972026 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:51 crc kubenswrapper[4954]: I1128 16:11:51.972037 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:51Z","lastTransitionTime":"2025-11-28T16:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.074631 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.074680 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.074696 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.074719 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.074736 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.107438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.107466 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.107474 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.107485 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.107493 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: E1128 16:11:52.123350 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:52Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.127098 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.127120 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.127127 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.127140 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.127151 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: E1128 16:11:52.144079 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:52Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.148297 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.148333 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.148348 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.148365 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.148379 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: E1128 16:11:52.167494 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:52Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.172170 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.172254 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.172279 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.172310 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.172334 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: E1128 16:11:52.192966 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:52Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.196402 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.196438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.196449 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.196467 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.196478 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: E1128 16:11:52.214053 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:52Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:52 crc kubenswrapper[4954]: E1128 16:11:52.214421 4954 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.216266 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.216322 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.216340 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.216364 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.216381 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.318367 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.318421 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.318432 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.318451 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.318464 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.421682 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.421724 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.421736 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.421751 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.421762 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.524453 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.524609 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.524631 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.524706 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.524890 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.628354 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.628414 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.628430 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.628454 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.628473 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.730725 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.730771 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.730787 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.730810 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.730830 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.834067 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.834130 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.834146 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.834172 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.834188 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.855712 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:52 crc kubenswrapper[4954]: E1128 16:11:52.855892 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.857031 4954 scope.go:117] "RemoveContainer" containerID="1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.938448 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.938933 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.938954 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.938979 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:52 crc kubenswrapper[4954]: I1128 16:11:52.938997 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:52Z","lastTransitionTime":"2025-11-28T16:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.041843 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.041929 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.041947 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.041971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.041989 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.145081 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.145109 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.145118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.145133 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.145146 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.247407 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.247438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.247447 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.247460 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.247468 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.350117 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.350189 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.350207 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.350235 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.350252 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.452773 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.452821 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.452833 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.452851 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.452864 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.555336 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.555379 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.555387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.555404 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.555413 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.657403 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.657446 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.657454 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.657472 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.657480 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.759916 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.759957 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.759971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.760005 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.760015 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.788180 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/1.log" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.791331 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.791771 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.808008 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.822503 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.848274 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"message\\\":\\\"etry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209491 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1128 16:11:39.209504 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209520 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1128 16:11:39.209447 6622 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:39.209583 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209594 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209608 6622 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1128 16:11:39.209649 6622 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1128 16:11:39.209663 6622 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.856108 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.856126 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.856268 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:53 crc kubenswrapper[4954]: E1128 16:11:53.856358 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:53 crc kubenswrapper[4954]: E1128 16:11:53.856484 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:53 crc kubenswrapper[4954]: E1128 16:11:53.856621 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.861742 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.861796 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.861813 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.861837 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.861854 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.864959 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.884054 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.896515 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.912456 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.925609 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.937129 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.950818 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.964309 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.964333 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.964343 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.964303 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.964356 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.964473 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:53Z","lastTransitionTime":"2025-11-28T16:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.975880 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:53 crc kubenswrapper[4954]: I1128 16:11:53.988982 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.000115 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:53Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.012822 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.025873 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.037097 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.067027 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.067061 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.067071 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.067086 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.067095 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.169205 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.169241 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.169249 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.169263 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.169272 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.272018 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.272103 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.272135 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.272166 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.272186 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.374745 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.374808 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.374829 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.374856 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.374876 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.477598 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.477668 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.477685 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.477713 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.477730 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.580613 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.580683 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.580699 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.580721 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.580733 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.684732 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.684809 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.684826 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.684850 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.684867 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.787573 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.787615 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.787624 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.787642 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.787652 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.796326 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/2.log" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.797307 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/1.log" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.800276 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" exitCode=1 Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.800308 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.800339 4954 scope.go:117] "RemoveContainer" containerID="1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.801435 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:11:54 crc kubenswrapper[4954]: E1128 16:11:54.801740 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.818080 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.828846 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.840226 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.856026 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:54 crc kubenswrapper[4954]: E1128 16:11:54.856193 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.861549 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1df61fc7900d79833d011c10f9db159cb3c0af7fa8c377ae73bb0cf4e82a35b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"message\\\":\\\"etry object setup: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209491 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-operator/iptables-alerter-4ln5h\\\\nI1128 16:11:39.209504 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:39.209520 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1128 16:11:39.209447 6622 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:39.209583 6622 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209594 6622 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1128 16:11:39.209608 6622 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1128 16:11:39.209649 6622 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1128 16:11:39.209663 6622 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:54Z\\\",\\\"message\\\":\\\"er-crc\\\\nI1128 16:11:53.867574 6894 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:53.867379 6894 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fn6fg in node crc\\\\nI1128 16:11:53.867581 6894 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:53.867590 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-fn6fg after 0 failed attempt(s)\\\\nI1128 16:11:53.867598 6894 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-fn6fg\\\\nI1128 16:11:53.867330 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1128 16:11:53.867615 6894 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:53.867309 6894 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nF1128 16:11:53.867628 6894 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.876137 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.888548 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.890006 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.890028 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.890038 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.890053 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.890065 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.902321 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.919405 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.935914 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.952592 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.965100 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.982994 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.994786 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.994842 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.994860 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.994883 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.994901 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:54Z","lastTransitionTime":"2025-11-28T16:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:54 crc kubenswrapper[4954]: I1128 16:11:54.998395 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:54Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.012717 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.026420 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.036730 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.051978 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.097907 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.097974 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.097992 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.098016 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.098036 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.200865 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.200972 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.200996 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.201025 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.201043 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.303327 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.303392 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.303410 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.303435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.303453 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.406988 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.407040 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.407060 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.407083 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.407101 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.509461 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.509568 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.509587 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.509611 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.509632 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.611674 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.611733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.611745 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.611760 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.611769 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.720126 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.720173 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.720183 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.720199 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.720212 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.805307 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/2.log" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.808665 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:11:55 crc kubenswrapper[4954]: E1128 16:11:55.808801 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.823297 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.823336 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.823351 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.823372 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.823389 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.828909 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.847811 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:54Z\\\",\\\"message\\\":\\\"er-crc\\\\nI1128 16:11:53.867574 6894 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:53.867379 6894 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fn6fg in node crc\\\\nI1128 16:11:53.867581 6894 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:53.867590 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-fn6fg after 0 failed attempt(s)\\\\nI1128 16:11:53.867598 6894 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-fn6fg\\\\nI1128 16:11:53.867330 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1128 16:11:53.867615 6894 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:53.867309 6894 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nF1128 16:11:53.867628 6894 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.856248 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.856280 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:55 crc kubenswrapper[4954]: E1128 16:11:55.856406 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.856702 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:55 crc kubenswrapper[4954]: E1128 16:11:55.856797 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:55 crc kubenswrapper[4954]: E1128 16:11:55.857089 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.859098 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.871428 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.885778 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.901149 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.914340 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.925593 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.925678 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.925793 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.925808 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.925829 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.925844 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:55Z","lastTransitionTime":"2025-11-28T16:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.938066 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.952077 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.966973 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.979210 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:55 crc kubenswrapper[4954]: I1128 16:11:55.993380 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:55Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.008456 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:56Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.026208 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:56Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.028147 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.028210 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.028228 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.028256 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.028276 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.040791 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:56Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.054722 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:56Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.131553 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.131626 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.131647 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.131695 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.131719 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.242658 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.242805 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.242835 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.242864 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.242885 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.346857 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.346935 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.346961 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.346997 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.347020 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.449966 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.450005 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.450016 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.450034 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.450045 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.551771 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.551810 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.551821 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.551839 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.551852 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.654247 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.654296 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.654315 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.654337 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.654355 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.757856 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.757951 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.757978 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.758016 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.758038 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.855715 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:56 crc kubenswrapper[4954]: E1128 16:11:56.855939 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.860877 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.860941 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.860956 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.860981 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.860997 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.963347 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.963398 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.963408 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.963424 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:56 crc kubenswrapper[4954]: I1128 16:11:56.963434 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:56Z","lastTransitionTime":"2025-11-28T16:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.066206 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.066256 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.066274 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.066296 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.066315 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.168400 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.168433 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.168441 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.168453 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.168463 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.271285 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.271347 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.271364 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.271389 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.271407 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.373391 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.373441 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.373460 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.373483 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.373500 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.476541 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.476573 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.476582 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.476596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.476604 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.579865 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.579923 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.579934 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.579949 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.579958 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.681999 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.682042 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.682053 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.682069 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.682084 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.784820 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.784859 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.784866 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.784880 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.784889 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.856769 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.856818 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.856813 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:57 crc kubenswrapper[4954]: E1128 16:11:57.856951 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:57 crc kubenswrapper[4954]: E1128 16:11:57.857055 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:57 crc kubenswrapper[4954]: E1128 16:11:57.857236 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.870339 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.884516 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.887686 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.887746 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.887759 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.887775 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.887821 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.901113 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.914662 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.929040 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.944406 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.967093 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.982063 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.989863 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.989912 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.989929 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.989953 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:57 crc kubenswrapper[4954]: I1128 16:11:57.989971 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:57Z","lastTransitionTime":"2025-11-28T16:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.001829 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:57Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.017375 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.037115 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.053331 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.066434 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.076789 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.088552 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.093023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.093098 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.093145 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.093181 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.093207 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.116553 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:54Z\\\",\\\"message\\\":\\\"er-crc\\\\nI1128 16:11:53.867574 6894 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:53.867379 6894 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fn6fg in node crc\\\\nI1128 16:11:53.867581 6894 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:53.867590 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-fn6fg after 0 failed attempt(s)\\\\nI1128 16:11:53.867598 6894 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-fn6fg\\\\nI1128 16:11:53.867330 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1128 16:11:53.867615 6894 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:53.867309 6894 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nF1128 16:11:53.867628 6894 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.128813 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:11:58Z is after 2025-08-24T17:21:41Z" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.196684 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.196735 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.196752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.196776 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.196792 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.298766 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.298962 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.299048 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.299112 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.299184 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.401843 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.401890 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.401903 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.401921 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.401932 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.504882 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.504941 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.504954 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.504968 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.504979 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.607451 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.607810 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.607993 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.608133 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.608261 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.620191 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:58 crc kubenswrapper[4954]: E1128 16:11:58.620387 4954 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:58 crc kubenswrapper[4954]: E1128 16:11:58.620462 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs podName:19107b9e-d11e-4360-a6c8-dfc9e96a8623 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:30.620443133 +0000 UTC m=+104.012111684 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs") pod "network-metrics-daemon-mrxnm" (UID: "19107b9e-d11e-4360-a6c8-dfc9e96a8623") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.710792 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.710836 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.710847 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.710864 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.710873 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.813480 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.813564 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.813584 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.813608 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.813625 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.855250 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:11:58 crc kubenswrapper[4954]: E1128 16:11:58.855467 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.915899 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.915974 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.915993 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.916017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:58 crc kubenswrapper[4954]: I1128 16:11:58.916035 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:58Z","lastTransitionTime":"2025-11-28T16:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.018607 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.018690 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.018711 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.018740 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.018760 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.121656 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.121695 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.121703 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.121718 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.121727 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.226597 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.226660 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.226680 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.226707 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.226728 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.328790 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.328831 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.328846 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.328863 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.328875 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.431107 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.431165 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.431182 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.431205 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.431221 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.533324 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.533390 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.533402 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.533419 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.533432 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.635506 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.635560 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.635569 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.635584 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.635595 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.737839 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.737896 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.737913 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.737937 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.737957 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.840724 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.840784 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.840801 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.840825 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.840842 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.856091 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.856156 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:11:59 crc kubenswrapper[4954]: E1128 16:11:59.856271 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.856297 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:11:59 crc kubenswrapper[4954]: E1128 16:11:59.856437 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:11:59 crc kubenswrapper[4954]: E1128 16:11:59.856561 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.943775 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.943852 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.943870 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.943894 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:11:59 crc kubenswrapper[4954]: I1128 16:11:59.943910 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:11:59Z","lastTransitionTime":"2025-11-28T16:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.046301 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.046360 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.046380 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.046404 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.046423 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.148730 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.148764 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.148777 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.148791 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.148801 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.251698 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.251769 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.251793 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.251821 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.251844 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.354927 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.354985 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.355003 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.355025 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.355040 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.458122 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.458187 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.458204 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.458228 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.458244 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.561404 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.561479 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.561500 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.561557 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.561576 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.664345 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.664418 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.664435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.664461 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.664478 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.767997 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.768088 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.768107 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.768162 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.768184 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.855914 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:00 crc kubenswrapper[4954]: E1128 16:12:00.856123 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.871103 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.871165 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.871188 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.871213 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.871230 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.973812 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.973878 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.973905 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.973937 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:00 crc kubenswrapper[4954]: I1128 16:12:00.973965 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:00Z","lastTransitionTime":"2025-11-28T16:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.076615 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.076691 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.076712 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.076741 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.076768 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.179257 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.179321 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.179338 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.179364 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.179381 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.282002 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.282041 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.282050 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.282079 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.282093 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.387023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.387097 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.387106 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.387119 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.387128 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.489557 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.489592 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.489601 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.489630 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.489639 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.592151 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.592218 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.592242 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.592271 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.592292 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.698894 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.698952 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.698971 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.699007 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.699031 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.802649 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.802745 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.802770 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.802799 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.802820 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.855835 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.855884 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.855928 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:01 crc kubenswrapper[4954]: E1128 16:12:01.856026 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:01 crc kubenswrapper[4954]: E1128 16:12:01.856178 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:01 crc kubenswrapper[4954]: E1128 16:12:01.856375 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.905243 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.905304 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.905323 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.905345 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:01 crc kubenswrapper[4954]: I1128 16:12:01.905362 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:01Z","lastTransitionTime":"2025-11-28T16:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.008715 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.008799 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.008826 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.008860 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.008880 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.111750 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.111812 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.111832 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.111856 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.111875 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.214928 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.214998 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.215015 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.215039 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.215057 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.299513 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.299626 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.299645 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.299670 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.299690 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: E1128 16:12:02.317713 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:02Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.323125 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.323183 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.323207 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.323240 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.323261 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: E1128 16:12:02.348177 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:02Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.353253 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.353312 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.353331 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.353355 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.353373 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: E1128 16:12:02.373588 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:02Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.379056 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.379117 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.379135 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.379156 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.379173 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: E1128 16:12:02.398612 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:02Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.404058 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.404118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.404139 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.404164 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.404186 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: E1128 16:12:02.422017 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:02Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:02 crc kubenswrapper[4954]: E1128 16:12:02.422161 4954 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.423937 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.423991 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.424004 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.424023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.424034 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.527269 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.527327 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.527343 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.527368 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.527387 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.630794 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.630878 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.630903 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.630961 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.630980 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.733588 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.733679 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.733696 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.733752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.733770 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.836593 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.836645 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.836668 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.836701 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.836724 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.855961 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:02 crc kubenswrapper[4954]: E1128 16:12:02.856146 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.939798 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.939874 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.939898 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.939927 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:02 crc kubenswrapper[4954]: I1128 16:12:02.939949 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:02Z","lastTransitionTime":"2025-11-28T16:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.044668 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.044746 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.044771 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.044802 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.044825 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.148058 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.148114 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.148131 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.148293 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.148310 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.251708 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.251778 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.251804 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.251832 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.251852 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.355032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.355087 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.355104 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.355125 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.355144 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.457849 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.457906 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.457923 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.457947 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.457965 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.561588 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.561653 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.561676 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.561700 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.561717 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.665468 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.665572 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.665592 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.665621 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.665638 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.768617 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.768688 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.768717 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.768749 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.768773 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.856127 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.856221 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:03 crc kubenswrapper[4954]: E1128 16:12:03.856288 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.856307 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:03 crc kubenswrapper[4954]: E1128 16:12:03.856439 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:03 crc kubenswrapper[4954]: E1128 16:12:03.856481 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.871695 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.871746 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.871763 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.871784 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.871804 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.975304 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.975356 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.975367 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.975386 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:03 crc kubenswrapper[4954]: I1128 16:12:03.975400 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:03Z","lastTransitionTime":"2025-11-28T16:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.078663 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.078744 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.078769 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.078800 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.078822 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.181669 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.181734 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.181752 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.181778 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.181799 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.284630 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.284735 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.284760 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.284792 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.284815 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.390505 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.390564 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.390576 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.390593 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.390604 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.493364 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.493404 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.493412 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.493425 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.493435 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.596627 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.596663 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.596671 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.596684 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.596693 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.699346 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.699413 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.699432 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.699457 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.699476 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.802275 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.802363 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.802381 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.802406 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.802441 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.840902 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-87jtn_d5ee5420-ed17-4059-8d54-3b486c2ffd1d/kube-multus/0.log" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.840999 4954 generic.go:334] "Generic (PLEG): container finished" podID="d5ee5420-ed17-4059-8d54-3b486c2ffd1d" containerID="2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c" exitCode=1 Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.841072 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-87jtn" event={"ID":"d5ee5420-ed17-4059-8d54-3b486c2ffd1d","Type":"ContainerDied","Data":"2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.841677 4954 scope.go:117] "RemoveContainer" containerID="2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.855268 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:04 crc kubenswrapper[4954]: E1128 16:12:04.855461 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.865207 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:04Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.889187 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:04Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.906027 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.906087 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.906111 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.906143 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.906165 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:04Z","lastTransitionTime":"2025-11-28T16:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.908006 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:04Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.927084 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:04Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.950370 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:04Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.964580 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:04Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:04 crc kubenswrapper[4954]: I1128 16:12:04.988726 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:04Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.008960 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.009020 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.009038 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.009064 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.009081 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.009147 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.028752 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.050005 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.070187 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.092700 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:12:04Z\\\",\\\"message\\\":\\\"2025-11-28T16:11:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577\\\\n2025-11-28T16:11:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577 to /host/opt/cni/bin/\\\\n2025-11-28T16:11:19Z [verbose] multus-daemon started\\\\n2025-11-28T16:11:19Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:12:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.111012 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.112415 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.112493 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.112517 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.113361 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.113426 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.132568 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.151866 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.180503 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:54Z\\\",\\\"message\\\":\\\"er-crc\\\\nI1128 16:11:53.867574 6894 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:53.867379 6894 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fn6fg in node crc\\\\nI1128 16:11:53.867581 6894 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:53.867590 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-fn6fg after 0 failed attempt(s)\\\\nI1128 16:11:53.867598 6894 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-fn6fg\\\\nI1128 16:11:53.867330 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1128 16:11:53.867615 6894 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:53.867309 6894 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nF1128 16:11:53.867628 6894 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.196167 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.216636 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.216665 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.216693 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.216711 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.216723 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.319034 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.319118 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.319136 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.319158 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.319176 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.422835 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.422887 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.422905 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.422928 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.422947 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.526113 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.526171 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.526188 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.526212 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.526229 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.628906 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.628966 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.628984 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.629007 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.629029 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.732225 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.732281 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.732299 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.732325 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.732343 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.835688 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.835775 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.835799 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.835831 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.835856 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.847657 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-87jtn_d5ee5420-ed17-4059-8d54-3b486c2ffd1d/kube-multus/0.log" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.847759 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-87jtn" event={"ID":"d5ee5420-ed17-4059-8d54-3b486c2ffd1d","Type":"ContainerStarted","Data":"47d5b77cc83f542563384a97dff022d8292541a330da7f0f775e7cbcb6ef72dc"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.855889 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.855983 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:05 crc kubenswrapper[4954]: E1128 16:12:05.856137 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.856182 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:05 crc kubenswrapper[4954]: E1128 16:12:05.856325 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:05 crc kubenswrapper[4954]: E1128 16:12:05.856442 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.868891 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.888809 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.926356 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:54Z\\\",\\\"message\\\":\\\"er-crc\\\\nI1128 16:11:53.867574 6894 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:53.867379 6894 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fn6fg in node crc\\\\nI1128 16:11:53.867581 6894 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:53.867590 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-fn6fg after 0 failed attempt(s)\\\\nI1128 16:11:53.867598 6894 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-fn6fg\\\\nI1128 16:11:53.867330 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1128 16:11:53.867615 6894 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:53.867309 6894 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nF1128 16:11:53.867628 6894 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.938876 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.938945 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.938963 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.938987 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.939005 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:05Z","lastTransitionTime":"2025-11-28T16:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.945964 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.965034 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:05 crc kubenswrapper[4954]: I1128 16:12:05.984396 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:05Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.005613 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.024521 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.040879 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.043635 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.043703 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.043723 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.043750 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.043767 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.062623 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.079866 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.098811 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.116771 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.137261 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47d5b77cc83f542563384a97dff022d8292541a330da7f0f775e7cbcb6ef72dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:12:04Z\\\",\\\"message\\\":\\\"2025-11-28T16:11:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577\\\\n2025-11-28T16:11:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577 to /host/opt/cni/bin/\\\\n2025-11-28T16:11:19Z [verbose] multus-daemon started\\\\n2025-11-28T16:11:19Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:12:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.146681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.146729 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.146746 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.146779 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.146797 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.159669 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.175869 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.193991 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:06Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.249931 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.250007 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.250031 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.250061 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.250085 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.353705 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.353785 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.353804 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.353831 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.353849 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.456567 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.456627 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.456644 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.456670 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.456716 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.559267 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.559334 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.559351 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.559378 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.559400 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.663814 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.663888 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.663913 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.663944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.663965 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.767250 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.767289 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.767297 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.767312 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.767322 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.855905 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:06 crc kubenswrapper[4954]: E1128 16:12:06.856325 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.871014 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.873324 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.873359 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.873370 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.873387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.873400 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.976822 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.977201 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.977221 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.977248 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:06 crc kubenswrapper[4954]: I1128 16:12:06.977268 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:06Z","lastTransitionTime":"2025-11-28T16:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.079644 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.079714 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.079733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.079760 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.079778 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.182697 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.182766 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.182778 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.182799 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.182811 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.286049 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.286130 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.286148 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.286174 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.286193 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.389450 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.389492 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.389504 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.389545 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.389563 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.493103 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.493179 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.493203 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.493233 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.493254 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.596365 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.596462 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.596480 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.596505 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.596545 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.699203 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.699284 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.699311 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.699342 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.699366 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.802956 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.803050 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.803067 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.803090 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.803108 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.855644 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.855737 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.855996 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:07 crc kubenswrapper[4954]: E1128 16:12:07.855979 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:07 crc kubenswrapper[4954]: E1128 16:12:07.856143 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:07 crc kubenswrapper[4954]: E1128 16:12:07.856239 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.876778 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.905745 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.905824 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.905850 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.905886 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.905908 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:07Z","lastTransitionTime":"2025-11-28T16:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.907445 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:54Z\\\",\\\"message\\\":\\\"er-crc\\\\nI1128 16:11:53.867574 6894 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:53.867379 6894 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fn6fg in node crc\\\\nI1128 16:11:53.867581 6894 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:53.867590 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-fn6fg after 0 failed attempt(s)\\\\nI1128 16:11:53.867598 6894 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-fn6fg\\\\nI1128 16:11:53.867330 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1128 16:11:53.867615 6894 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:53.867309 6894 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nF1128 16:11:53.867628 6894 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.923593 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.935765 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6e88599-a02c-40e5-8854-056e34c39fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b6ba9eaa7660cf70b083a47af130e08fd2635c2b2c2a69a9302bf8284cb3280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b760bb4479062363e578bfb1df320735d51bd18bcf5147e5f09747b09546673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b760bb4479062363e578bfb1df320735d51bd18bcf5147e5f09747b09546673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.948512 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.962111 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.973908 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.986699 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:07 crc kubenswrapper[4954]: I1128 16:12:07.999344 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:07Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.008095 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.008149 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.008168 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.008188 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.008199 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.013193 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.025275 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.037761 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.053246 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.065768 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.078756 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47d5b77cc83f542563384a97dff022d8292541a330da7f0f775e7cbcb6ef72dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:12:04Z\\\",\\\"message\\\":\\\"2025-11-28T16:11:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577\\\\n2025-11-28T16:11:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577 to /host/opt/cni/bin/\\\\n2025-11-28T16:11:19Z [verbose] multus-daemon started\\\\n2025-11-28T16:11:19Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:12:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.095661 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.108423 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.111379 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.111416 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.111425 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.111439 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.111453 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.125508 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:08Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.214245 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.214298 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.214316 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.214332 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.214341 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.317155 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.317249 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.317272 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.317304 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.317328 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.421785 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.421845 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.421861 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.421888 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.421905 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.525373 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.525444 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.525463 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.525490 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.525510 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.628276 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.628344 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.628368 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.628395 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.628414 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.731487 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.731617 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.731647 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.731681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.731704 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.834325 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.834388 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.834407 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.834433 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.834454 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.855938 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:08 crc kubenswrapper[4954]: E1128 16:12:08.856316 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.937306 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.937367 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.937387 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.937411 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:08 crc kubenswrapper[4954]: I1128 16:12:08.937430 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:08Z","lastTransitionTime":"2025-11-28T16:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.040970 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.041047 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.041068 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.041097 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.041114 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.144510 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.144640 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.144664 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.144696 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.144718 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.247662 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.247744 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.247772 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.247810 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.247831 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.350416 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.350494 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.350516 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.350580 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.350605 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.454958 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.455023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.455037 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.455055 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.455068 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.558747 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.558815 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.558835 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.558863 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.558882 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.662431 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.662492 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.662519 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.662582 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.662606 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.767073 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.767154 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.767178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.767209 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.767235 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.856340 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.856353 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.856489 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:09 crc kubenswrapper[4954]: E1128 16:12:09.856687 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:09 crc kubenswrapper[4954]: E1128 16:12:09.857277 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:09 crc kubenswrapper[4954]: E1128 16:12:09.857471 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.857584 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:12:09 crc kubenswrapper[4954]: E1128 16:12:09.857774 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.870415 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.870438 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.870446 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.870460 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.870470 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.973716 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.973789 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.973814 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.973845 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:09 crc kubenswrapper[4954]: I1128 16:12:09.973869 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:09Z","lastTransitionTime":"2025-11-28T16:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.077518 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.077639 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.077658 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.077683 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.077701 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.180656 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.180720 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.180739 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.180766 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.180785 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.283629 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.283697 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.283715 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.283744 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.283779 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.386281 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.386349 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.386366 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.386390 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.386406 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.489893 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.489985 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.490010 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.490095 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.490121 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.594017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.594088 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.594107 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.594137 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.594159 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.697813 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.697888 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.697905 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.697930 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.697951 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.809008 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.809101 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.809122 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.809152 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.809174 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.855248 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:10 crc kubenswrapper[4954]: E1128 16:12:10.855460 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.912388 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.912445 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.912463 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.912490 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:10 crc kubenswrapper[4954]: I1128 16:12:10.912512 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:10Z","lastTransitionTime":"2025-11-28T16:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.015878 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.015934 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.015946 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.015966 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.015981 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.118519 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.118604 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.118618 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.118639 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.118652 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.222754 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.222819 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.222839 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.222870 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.222894 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.327649 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.328069 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.328155 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.328198 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.328248 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.432075 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.432163 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.432185 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.432217 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.432238 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.536218 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.536285 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.536301 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.536329 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.536347 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.639474 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.639518 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.639551 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.639566 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.639576 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.742346 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.742404 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.742418 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.742437 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.742450 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.845680 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.845717 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.845727 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.845744 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.845756 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.856140 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.856140 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:11 crc kubenswrapper[4954]: E1128 16:12:11.856260 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.856268 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:11 crc kubenswrapper[4954]: E1128 16:12:11.856329 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:11 crc kubenswrapper[4954]: E1128 16:12:11.856568 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.947740 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.947812 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.947829 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.947852 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:11 crc kubenswrapper[4954]: I1128 16:12:11.947864 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:11Z","lastTransitionTime":"2025-11-28T16:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.050656 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.050709 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.050724 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.050743 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.050758 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.153869 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.153920 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.153936 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.153961 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.153979 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.256916 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.257126 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.257154 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.257183 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.257201 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.360747 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.360834 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.360857 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.360934 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.360961 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.464178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.464250 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.464268 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.464287 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.464301 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.567427 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.567490 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.567513 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.567569 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.567587 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: E1128 16:12:12.589047 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.594567 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.594630 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.594648 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.594672 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.594690 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: E1128 16:12:12.616119 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.622089 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.622456 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.622640 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.622821 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.623016 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: E1128 16:12:12.643998 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.650642 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.650709 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.650733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.650761 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.650783 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: E1128 16:12:12.669074 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.676520 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.676613 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.676633 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.676660 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.676677 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: E1128 16:12:12.698313 4954 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f44fb9d0-93ac-4b47-a932-40af0d9339d2\\\",\\\"systemUUID\\\":\\\"0cbba33b-5b41-4595-ae12-f91c5d706ba3\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:12Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:12 crc kubenswrapper[4954]: E1128 16:12:12.700156 4954 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.702560 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.702622 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.702641 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.702668 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.702685 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.805548 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.805620 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.805636 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.805662 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.805679 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.855439 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:12 crc kubenswrapper[4954]: E1128 16:12:12.855756 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.909227 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.909283 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.909301 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.909330 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:12 crc kubenswrapper[4954]: I1128 16:12:12.909348 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:12Z","lastTransitionTime":"2025-11-28T16:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.031495 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.031581 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.031599 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.031623 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.031639 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.134970 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.135024 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.135041 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.135064 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.135082 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.239831 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.239904 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.239929 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.239959 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.239980 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.343426 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.343491 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.343512 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.343566 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.343584 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.446669 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.446733 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.446754 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.446779 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.446797 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.549704 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.549762 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.549780 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.549803 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.549820 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.653190 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.653262 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.653279 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.653305 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.653323 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.756389 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.756461 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.756482 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.756505 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.756553 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.855982 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:13 crc kubenswrapper[4954]: E1128 16:12:13.856274 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.856747 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.856829 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:13 crc kubenswrapper[4954]: E1128 16:12:13.856920 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:13 crc kubenswrapper[4954]: E1128 16:12:13.857063 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.858667 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.858777 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.858835 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.858908 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.858970 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.962140 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.962393 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.962473 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.962606 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:13 crc kubenswrapper[4954]: I1128 16:12:13.962728 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:13Z","lastTransitionTime":"2025-11-28T16:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.065694 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.066110 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.066293 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.066516 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.066715 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.170216 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.170297 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.170313 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.170335 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.170351 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.274042 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.274110 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.274131 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.274156 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.274174 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.377810 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.377868 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.377884 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.377910 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.377930 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.421008 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.421232 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:13:18.42119151 +0000 UTC m=+151.812860081 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.421758 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.421904 4954 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.422103 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:13:18.422084418 +0000 UTC m=+151.813752989 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.422019 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.422496 4954 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.422805 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 16:13:18.422772931 +0000 UTC m=+151.814441522 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.480397 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.480441 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.480496 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.480568 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.480597 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.523631 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.523824 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.523945 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.523991 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.524011 4954 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.524111 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 16:13:18.524080466 +0000 UTC m=+151.915749057 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.524141 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.524187 4954 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.524206 4954 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.524283 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 16:13:18.524260472 +0000 UTC m=+151.915929043 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.583797 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.584216 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.584377 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.584521 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.584685 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.687452 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.687555 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.687594 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.687618 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.687636 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.790914 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.791245 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.791441 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.791709 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.791940 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.855868 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:14 crc kubenswrapper[4954]: E1128 16:12:14.856022 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.894302 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.894608 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.894829 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.895003 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:14 crc kubenswrapper[4954]: I1128 16:12:14.895124 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:14.998017 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:14.998066 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:14.998082 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:14.998105 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:14.998124 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:14Z","lastTransitionTime":"2025-11-28T16:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.101073 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.101214 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.101245 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.101271 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.101290 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.204612 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.204672 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.204695 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.204722 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.204739 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.308208 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.308287 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.308314 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.308344 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.308366 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.416962 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.417038 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.417056 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.417082 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.417101 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.520223 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.520292 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.520309 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.520334 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.520354 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.623949 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.624033 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.624058 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.624093 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.624115 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.727000 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.727045 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.727053 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.727068 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.727077 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.830252 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.830311 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.830329 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.830356 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.830373 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.855151 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.855200 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.855288 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:15 crc kubenswrapper[4954]: E1128 16:12:15.855366 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:15 crc kubenswrapper[4954]: E1128 16:12:15.855474 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:15 crc kubenswrapper[4954]: E1128 16:12:15.855692 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.874804 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.932968 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.933018 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.933035 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.933061 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:15 crc kubenswrapper[4954]: I1128 16:12:15.933077 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:15Z","lastTransitionTime":"2025-11-28T16:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.036466 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.036550 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.036562 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.036585 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.036599 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.139087 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.139194 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.139214 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.139239 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.139262 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.242414 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.242466 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.242484 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.242508 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.242561 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.346056 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.346142 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.346167 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.346197 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.346219 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.449619 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.449658 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.449669 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.449685 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.449697 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.553023 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.553154 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.553178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.553207 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.553229 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.655844 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.655900 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.655920 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.655944 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.655960 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.759561 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.759662 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.759681 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.759704 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.759720 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.855502 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:16 crc kubenswrapper[4954]: E1128 16:12:16.855881 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.862437 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.862502 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.862522 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.862575 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.862595 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.965724 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.965791 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.965812 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.965837 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:16 crc kubenswrapper[4954]: I1128 16:12:16.965877 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:16Z","lastTransitionTime":"2025-11-28T16:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.075803 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.075917 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.075936 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.075964 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.075984 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.179168 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.179229 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.179247 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.179271 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.179289 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.282698 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.282758 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.282775 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.282800 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.282820 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.386416 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.386478 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.386494 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.386519 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.386610 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.490123 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.490178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.490195 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.490217 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.490238 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.593414 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.593467 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.593478 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.593501 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.593514 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.701502 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.701583 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.701596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.701617 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.701630 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.804657 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.804723 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.804741 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.804765 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.804784 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.856109 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.857871 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:17 crc kubenswrapper[4954]: E1128 16:12:17.858699 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:17 crc kubenswrapper[4954]: E1128 16:12:17.859159 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.859202 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:17 crc kubenswrapper[4954]: E1128 16:12:17.860871 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.889605 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df6dadea-b282-4892-a3ed-56a7ebeeb35d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c0811323f5225108fc884caaf8772c68fc0b78c2733c29b7478a0932f5bd3f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e6c4a5f20c5a103ede81b0361533e7ccd1f682523d7b7e6d2d34a0f9fb5e277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://62e6e0f056b85b3f3f1a216913798ac522080ed9a49c8204b457ce927952c706\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae11691f00b8caaa9e2a9b9f0cb53a8a083a33a11acbfcc20bebfc0ff2dd5b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9a536c63b5e991b755dcc59c530a9df627798421830320812385f99cd7ea4c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11db6a01b20b7c530007b4dd86193150ebcb6229017cd42f9cae4ed9672fc5e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11db6a01b20b7c530007b4dd86193150ebcb6229017cd42f9cae4ed9672fc5e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b63618fe32efede7129d09e2be2a1160cefb0910ab24ec261649746ff6c653\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5b63618fe32efede7129d09e2be2a1160cefb0910ab24ec261649746ff6c653\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f0e45ad2911d8ed39bc3add6de7b1b3281e168ebb3651cc4513be11902699fdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0e45ad2911d8ed39bc3add6de7b1b3281e168ebb3651cc4513be11902699fdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.907258 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.907324 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.907346 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.907374 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.907393 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:17Z","lastTransitionTime":"2025-11-28T16:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.907280 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vjmwt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"50663cb6-2935-4019-83d0-1fd0e3100b8d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae8f01f5eefa02ea1a7a2d5f729832fa9efa1946ae4ab3e1fff7edc3181bd805\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xnfdv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:19Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vjmwt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.927807 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.959737 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4fb02adc-75ef-4e63-841d-7fa817cc8da2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:11:54Z\\\",\\\"message\\\":\\\"er-crc\\\\nI1128 16:11:53.867574 6894 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 16:11:53.867379 6894 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fn6fg in node crc\\\\nI1128 16:11:53.867581 6894 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1128 16:11:53.867590 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-fn6fg after 0 failed attempt(s)\\\\nI1128 16:11:53.867598 6894 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-fn6fg\\\\nI1128 16:11:53.867330 6894 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1128 16:11:53.867615 6894 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1128 16:11:53.867309 6894 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nF1128 16:11:53.867628 6894 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handl\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wrv7t_openshift-ovn-kubernetes(4fb02adc-75ef-4e63-841d-7fa817cc8da2)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d782w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wrv7t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.977677 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19107b9e-d11e-4360-a6c8-dfc9e96a8623\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:26Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r4fx5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:26Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-mrxnm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:17 crc kubenswrapper[4954]: I1128 16:12:17.996604 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6e88599-a02c-40e5-8854-056e34c39fcc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b6ba9eaa7660cf70b083a47af130e08fd2635c2b2c2a69a9302bf8284cb3280\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4b760bb4479062363e578bfb1df320735d51bd18bcf5147e5f09747b09546673\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b760bb4479062363e578bfb1df320735d51bd18bcf5147e5f09747b09546673\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:17Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.010616 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.010730 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.010754 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.010782 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.010803 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.018635 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.037717 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:13Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c706a4e2517e59cb22ab12f0e75e43fe4e4f22db559ddb5528556dfa3fc1f525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://226cc161b0368aeeecd594a34ec24d586e46c7d697695e1ef91e46b5798e45cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.056632 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.076390 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92ddd1ce-e1c1-4606-8b8d-066aeba50079\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4662d07889c1b1680c3de5df499184e5f5e6ec629a141607d2cc106d5e3ae291\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-427wm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-jprxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.096879 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-87jtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5ee5420-ed17-4059-8d54-3b486c2ffd1d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47d5b77cc83f542563384a97dff022d8292541a330da7f0f775e7cbcb6ef72dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T16:12:04Z\\\",\\\"message\\\":\\\"2025-11-28T16:11:17+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577\\\\n2025-11-28T16:11:17+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c605a6a9-2226-4dcf-afc7-f97c3b483577 to /host/opt/cni/bin/\\\\n2025-11-28T16:11:19Z [verbose] multus-daemon started\\\\n2025-11-28T16:11:19Z [verbose] Readiness Indicator file check\\\\n2025-11-28T16:12:04Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2dkp5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-87jtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.114239 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.114300 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.114318 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.114345 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.114363 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.121090 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ee83a2e-2cf9-42c3-b884-11307ed972a9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://32b7c27e02967a6d6bb8aa2e0ff0b154e943197879b38b7c3849a6b66c8b1f06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fa0cc4d625bf6a1e8b55728bf055bdd6a59d790ca19ba2506c5eefb00b107923\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b364e51a4f814026bc9dfb1a34c6498de523a2e4ebfd41bc0f72ca274608a667\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de86b6510933240c0e56a25262d38d5021ab97dd966a16e0db5515497d7b35b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://738adc42046eb587b4349ed2c9903c939d206c440e4a98a6d8ea80c3802a0fbd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c1b691bc4f05a53d7dae632a638e4ba320e2c29eace21c3f33938de95de9ad3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2a57563f0d9cb881f22b0f92be801ee53ada54f91b68a8d6cf6a4432a892766\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:11:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:11:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xklf7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fn6fg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.137058 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-mg7f9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"604274ad-4365-4c81-a94f-9a234d4aa997\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46db06bc4e43bb42a07c1c5c146b77f96052ed0f8c3d5560e30345b3322d9b78\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4b5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:12Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-mg7f9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.159891 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cfded82-34af-4634-9e29-40a7509efd7e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T16:11:10Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 16:11:04.703563 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 16:11:04.704712 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3052568707/tls.crt::/tmp/serving-cert-3052568707/tls.key\\\\\\\"\\\\nI1128 16:11:10.347241 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 16:11:10.350488 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 16:11:10.350590 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 16:11:10.350665 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 16:11:10.350697 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 16:11:10.356245 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 16:11:10.356345 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356368 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 16:11:10.356390 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 16:11:10.356416 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 16:11:10.356437 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 16:11:10.356456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 16:11:10.356683 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 16:11:10.371293 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:52Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.180374 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c5c1c06c-4e5d-48b0-887e-5a238f0d9aee\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb853942bed21ef889e232d9f6784e13560985ce65c523af8a700985a204b1cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2d1bc8a92eae684561a0fa71c356fab06de29b7466482282ccdf466b90825de\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73a2e74c93209e46430bb256e2c0d8097373894437134fcac13240e382c73dd0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.199754 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"334428a9-a587-48cd-837c-f2551b88391d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:10:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ba1da465ad6f3ae487388090264325459e3799cfad00abae0a0835f2dafb3a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca5e399c8bfc6ffc2a7b28afb56ad29ad48bbdb07306055d3e14187688654d94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2bd4993194ad729e8723b91495cec4774b05f0dea94f91df694ab2f53d2c5def\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:10:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://96056d55990fc4f214183531989926e461b2cbdd53bd0923af365ae163e188c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T16:10:50Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T16:10:50Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:10:48Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.217063 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.217138 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.217151 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.217170 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.217203 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.223468 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75c63c849abf53ce28ee628c40a45ff8189fa18feddd57fffbd6e9f344cac07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.241904 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:16Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58c42aa54fd271755aa5c8c3d1bb96fd575ac0412c8b45c8daabaaa0d571841c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.260073 4954 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bcb37cf-0813-4df2-9f5b-dfab430a96db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T16:11:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://407b6370a15bedec34b52226ccca73c6d1873b4204cc2035106168a4908f02be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f535cb7dd3d283846e7152d8ca8c405a94abef2815187a5d026ba56b269b3299\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T16:11:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54m8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T16:11:25Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-gx7hk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T16:12:18Z is after 2025-08-24T17:21:41Z" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.320392 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.320444 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.320463 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.320486 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.320503 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.423296 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.423363 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.423381 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.423407 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.423427 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.526519 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.526606 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.526622 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.526648 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.526665 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.629559 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.629617 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.629634 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.629660 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.629679 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.733148 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.733515 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.733694 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.733833 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.733995 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.838448 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.838860 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.839013 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.839146 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.839274 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.856117 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:18 crc kubenswrapper[4954]: E1128 16:12:18.856492 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.943416 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.943853 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.944035 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.944185 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:18 crc kubenswrapper[4954]: I1128 16:12:18.944352 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:18Z","lastTransitionTime":"2025-11-28T16:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.048678 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.048761 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.048785 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.048818 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.048842 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.151658 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.152039 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.152210 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.152441 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.152645 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.256098 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.256178 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.256203 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.256253 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.256281 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.359099 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.359163 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.359175 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.359194 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.359207 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.462795 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.462867 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.462886 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.462912 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.462931 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.566169 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.566220 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.566231 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.566255 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.566269 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.671557 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.671663 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.671682 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.671706 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.671724 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.775337 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.775401 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.775419 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.775446 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.775462 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.855684 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.855782 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.855834 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:19 crc kubenswrapper[4954]: E1128 16:12:19.855908 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:19 crc kubenswrapper[4954]: E1128 16:12:19.856047 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:19 crc kubenswrapper[4954]: E1128 16:12:19.856195 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.878407 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.878491 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.878511 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.878564 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.878586 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.981174 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.981233 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.981251 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.981274 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:19 crc kubenswrapper[4954]: I1128 16:12:19.981294 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:19Z","lastTransitionTime":"2025-11-28T16:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.084714 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.084769 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.084786 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.084813 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.084831 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.187801 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.187880 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.187897 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.188344 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.188400 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.291376 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.291419 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.291435 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.291456 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.291472 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.394996 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.395043 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.395060 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.395085 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.395101 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.498490 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.498596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.498614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.498639 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.498659 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.602227 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.602270 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.602281 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.602297 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.602308 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.705476 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.705569 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.705592 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.705621 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.705639 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.808745 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.808796 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.808810 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.808830 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.808844 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.855286 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:20 crc kubenswrapper[4954]: E1128 16:12:20.855450 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.910896 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.910961 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.910978 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.910994 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:20 crc kubenswrapper[4954]: I1128 16:12:20.911007 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:20Z","lastTransitionTime":"2025-11-28T16:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.013970 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.014032 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.014052 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.014078 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.014098 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.117699 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.117762 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.117780 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.117803 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.117821 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.221049 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.221126 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.221149 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.221177 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.221199 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.325104 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.325188 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.325212 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.325242 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.325265 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.428473 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.428579 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.428598 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.428624 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.428677 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.531086 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.531250 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.531273 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.531309 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.531331 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.634507 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.634596 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.634613 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.634637 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.634653 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.737614 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.737680 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.737698 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.737723 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.737741 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.840991 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.841066 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.841084 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.841107 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.841125 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.855722 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.855791 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.855837 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:21 crc kubenswrapper[4954]: E1128 16:12:21.855910 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:21 crc kubenswrapper[4954]: E1128 16:12:21.856046 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:21 crc kubenswrapper[4954]: E1128 16:12:21.857072 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.857464 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.943951 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.944318 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.944336 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.944359 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:21 crc kubenswrapper[4954]: I1128 16:12:21.944378 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:21Z","lastTransitionTime":"2025-11-28T16:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.047464 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.047565 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.047588 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.047618 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.047638 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.150842 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.150897 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.150913 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.150934 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.150949 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.254267 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.254312 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.254326 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.254349 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.254364 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.357125 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.357162 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.357170 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.357368 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.357377 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.460368 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.460430 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.460447 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.460471 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.460487 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.563414 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.563464 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.563482 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.563505 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.563522 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.667090 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.667147 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.667163 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.667185 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.667201 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.766921 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.766988 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.767005 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.767031 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.767054 4954 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T16:12:22Z","lastTransitionTime":"2025-11-28T16:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 16:12:22 crc kubenswrapper[4954]: I1128 16:12:22.855670 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:22 crc kubenswrapper[4954]: E1128 16:12:22.855858 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.451449 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47"] Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.452517 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.456279 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.456268 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.456285 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.460664 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.521841 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-vjmwt" podStartSLOduration=73.521822918 podStartE2EDuration="1m13.521822918s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.520388541 +0000 UTC m=+96.912057112" watchObservedRunningTime="2025-11-28 16:12:23.521822918 +0000 UTC m=+96.913491469" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.522090 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=8.522082586 podStartE2EDuration="8.522082586s" podCreationTimestamp="2025-11-28 16:12:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.500408764 +0000 UTC m=+96.892077395" watchObservedRunningTime="2025-11-28 16:12:23.522082586 +0000 UTC m=+96.913751137" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.527897 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc8c6660-c306-4435-b267-f29dc694cd6d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.527967 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dc8c6660-c306-4435-b267-f29dc694cd6d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.528008 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dc8c6660-c306-4435-b267-f29dc694cd6d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.528059 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8c6660-c306-4435-b267-f29dc694cd6d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.528117 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dc8c6660-c306-4435-b267-f29dc694cd6d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.621038 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podStartSLOduration=73.621014564 podStartE2EDuration="1m13.621014564s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.605391298 +0000 UTC m=+96.997059839" watchObservedRunningTime="2025-11-28 16:12:23.621014564 +0000 UTC m=+97.012683135" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.629733 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc8c6660-c306-4435-b267-f29dc694cd6d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.629791 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dc8c6660-c306-4435-b267-f29dc694cd6d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.629829 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dc8c6660-c306-4435-b267-f29dc694cd6d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.629881 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8c6660-c306-4435-b267-f29dc694cd6d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.629927 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dc8c6660-c306-4435-b267-f29dc694cd6d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.629954 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dc8c6660-c306-4435-b267-f29dc694cd6d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.630039 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dc8c6660-c306-4435-b267-f29dc694cd6d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.631508 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dc8c6660-c306-4435-b267-f29dc694cd6d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.640361 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=17.640332071 podStartE2EDuration="17.640332071s" podCreationTimestamp="2025-11-28 16:12:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.638002905 +0000 UTC m=+97.029671506" watchObservedRunningTime="2025-11-28 16:12:23.640332071 +0000 UTC m=+97.032000652" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.644882 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc8c6660-c306-4435-b267-f29dc694cd6d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.648610 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8c6660-c306-4435-b267-f29dc694cd6d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-djq47\" (UID: \"dc8c6660-c306-4435-b267-f29dc694cd6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.753589 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-87jtn" podStartSLOduration=73.753570563 podStartE2EDuration="1m13.753570563s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.729283256 +0000 UTC m=+97.120951807" watchObservedRunningTime="2025-11-28 16:12:23.753570563 +0000 UTC m=+97.145239104" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.764146 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-fn6fg" podStartSLOduration=73.764125355 podStartE2EDuration="1m13.764125355s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.754225435 +0000 UTC m=+97.145893976" watchObservedRunningTime="2025-11-28 16:12:23.764125355 +0000 UTC m=+97.155793896" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.764990 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-mg7f9" podStartSLOduration=73.764982853 podStartE2EDuration="1m13.764982853s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.764063003 +0000 UTC m=+97.155731544" watchObservedRunningTime="2025-11-28 16:12:23.764982853 +0000 UTC m=+97.156651394" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.777900 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=72.777882761 podStartE2EDuration="1m12.777882761s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.777361445 +0000 UTC m=+97.169029986" watchObservedRunningTime="2025-11-28 16:12:23.777882761 +0000 UTC m=+97.169551302" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.779585 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" Nov 28 16:12:23 crc kubenswrapper[4954]: W1128 16:12:23.794788 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc8c6660_c306_4435_b267_f29dc694cd6d.slice/crio-5cda955a85d2d33fdd6061ac094e28953c04558bb5e35e0e675d8054da101ce2 WatchSource:0}: Error finding container 5cda955a85d2d33fdd6061ac094e28953c04558bb5e35e0e675d8054da101ce2: Status 404 returned error can't find the container with id 5cda955a85d2d33fdd6061ac094e28953c04558bb5e35e0e675d8054da101ce2 Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.806081 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=71.806065375 podStartE2EDuration="1m11.806065375s" podCreationTimestamp="2025-11-28 16:11:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.795749981 +0000 UTC m=+97.187418522" watchObservedRunningTime="2025-11-28 16:12:23.806065375 +0000 UTC m=+97.197733916" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.806321 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=44.806316154 podStartE2EDuration="44.806316154s" podCreationTimestamp="2025-11-28 16:11:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.805840578 +0000 UTC m=+97.197509119" watchObservedRunningTime="2025-11-28 16:12:23.806316154 +0000 UTC m=+97.197984695" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.834153 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-gx7hk" podStartSLOduration=72.834139646 podStartE2EDuration="1m12.834139646s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:23.833891698 +0000 UTC m=+97.225560249" watchObservedRunningTime="2025-11-28 16:12:23.834139646 +0000 UTC m=+97.225808187" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.855607 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.856827 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:23 crc kubenswrapper[4954]: E1128 16:12:23.856952 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.857239 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:23 crc kubenswrapper[4954]: E1128 16:12:23.857563 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:23 crc kubenswrapper[4954]: E1128 16:12:23.857819 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:23 crc kubenswrapper[4954]: I1128 16:12:23.924071 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" event={"ID":"dc8c6660-c306-4435-b267-f29dc694cd6d","Type":"ContainerStarted","Data":"5cda955a85d2d33fdd6061ac094e28953c04558bb5e35e0e675d8054da101ce2"} Nov 28 16:12:24 crc kubenswrapper[4954]: I1128 16:12:24.855087 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:24 crc kubenswrapper[4954]: E1128 16:12:24.855230 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:24 crc kubenswrapper[4954]: I1128 16:12:24.932062 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/2.log" Nov 28 16:12:24 crc kubenswrapper[4954]: I1128 16:12:24.936486 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerStarted","Data":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} Nov 28 16:12:24 crc kubenswrapper[4954]: I1128 16:12:24.937237 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:12:24 crc kubenswrapper[4954]: I1128 16:12:24.938986 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" event={"ID":"dc8c6660-c306-4435-b267-f29dc694cd6d","Type":"ContainerStarted","Data":"287ee2482372e32eb7390685c45322187bbb0377c3885c9694bda0149d1040d8"} Nov 28 16:12:25 crc kubenswrapper[4954]: I1128 16:12:25.004550 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-djq47" podStartSLOduration=75.004521262 podStartE2EDuration="1m15.004521262s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:25.004054587 +0000 UTC m=+98.395723148" watchObservedRunningTime="2025-11-28 16:12:25.004521262 +0000 UTC m=+98.396189803" Nov 28 16:12:25 crc kubenswrapper[4954]: I1128 16:12:25.004936 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podStartSLOduration=75.004932855 podStartE2EDuration="1m15.004932855s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:24.990321361 +0000 UTC m=+98.381989922" watchObservedRunningTime="2025-11-28 16:12:25.004932855 +0000 UTC m=+98.396601386" Nov 28 16:12:25 crc kubenswrapper[4954]: I1128 16:12:25.203148 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-mrxnm"] Nov 28 16:12:25 crc kubenswrapper[4954]: I1128 16:12:25.203274 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:25 crc kubenswrapper[4954]: E1128 16:12:25.203375 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:25 crc kubenswrapper[4954]: I1128 16:12:25.855820 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:25 crc kubenswrapper[4954]: I1128 16:12:25.855820 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:25 crc kubenswrapper[4954]: E1128 16:12:25.856088 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 16:12:25 crc kubenswrapper[4954]: E1128 16:12:25.856298 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 16:12:26 crc kubenswrapper[4954]: I1128 16:12:26.855595 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:26 crc kubenswrapper[4954]: I1128 16:12:26.855684 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:26 crc kubenswrapper[4954]: E1128 16:12:26.855775 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 16:12:26 crc kubenswrapper[4954]: E1128 16:12:26.855890 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-mrxnm" podUID="19107b9e-d11e-4360-a6c8-dfc9e96a8623" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.858822 4954 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.859423 4954 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.859436 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.859515 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.862920 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.865119 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.901256 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hvvf8"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.901801 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.912760 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.913449 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.914926 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.917687 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2ckdw"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.918286 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.919305 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.919753 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.919943 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.920366 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.920673 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sr684"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.920690 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.921940 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.921860 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.939994 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.941636 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.942051 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.948419 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qngll"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.948960 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.949882 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.950138 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.950453 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.950744 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.951301 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.952760 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.952820 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.953887 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954035 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954073 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954103 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954112 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954162 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954322 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954332 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954375 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954382 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954437 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954466 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954520 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954542 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954587 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954712 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.954757 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.955821 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.956177 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.956681 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.956724 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.965203 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.965903 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.974628 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.974988 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975028 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975290 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975391 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975813 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975842 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qllgz\" (UniqueName: \"kubernetes.io/projected/76c97c39-16a9-4231-ade3-903f2eb8b4be-kube-api-access-qllgz\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975893 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-serving-cert\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975933 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975964 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-audit-policies\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.975992 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976021 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-config\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976065 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976099 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-audit\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976098 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976132 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/39da966e-8c22-4165-81e3-be4df8c3e0d8-audit-dir\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976183 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjdvj\" (UniqueName: \"kubernetes.io/projected/72eb201a-f91c-4d66-af4f-2c11a40b52a3-kube-api-access-zjdvj\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976288 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-images\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976369 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976365 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/39da966e-8c22-4165-81e3-be4df8c3e0d8-node-pullsecrets\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976425 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-serving-cert\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976498 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-config\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976554 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqds5\" (UniqueName: \"kubernetes.io/projected/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-kube-api-access-mqds5\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976616 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjvvz\" (UniqueName: \"kubernetes.io/projected/6b62bff3-6957-454f-94bc-7a0579e32b2c-kube-api-access-sjvvz\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976677 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976716 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-config\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976772 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bmm8\" (UniqueName: \"kubernetes.io/projected/39da966e-8c22-4165-81e3-be4df8c3e0d8-kube-api-access-9bmm8\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976815 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-client-ca\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976854 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72eb201a-f91c-4d66-af4f-2c11a40b52a3-serving-cert\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976896 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-config\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976929 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-encryption-config\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.976959 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76c97c39-16a9-4231-ade3-903f2eb8b4be-serving-cert\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977002 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-etcd-serving-ca\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977034 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-encryption-config\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977067 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977108 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-etcd-client\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977141 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977190 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-service-ca-bundle\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977228 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6b62bff3-6957-454f-94bc-7a0579e32b2c-audit-dir\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977257 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-image-import-ca\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.977288 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-etcd-client\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.980013 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.980543 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.982578 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.982667 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.982793 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.982834 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.996601 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sr684"] Nov 28 16:12:27 crc kubenswrapper[4954]: I1128 16:12:27.997480 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.000013 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2ckdw"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.012478 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d6kn6"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.017340 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.018199 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.018420 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.026232 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-kb4kp"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.036486 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.036875 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-kw5b6"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.037101 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hvvf8"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.037118 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.037477 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.037923 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.038030 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.038622 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.039717 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.040218 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.041716 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-ghjjk"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.042172 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-5zg2k"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.042518 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.042641 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.043888 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.044208 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.044550 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.044776 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.045440 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-26cld"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.045869 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.046263 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.054220 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7g4lh"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.054902 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w96mt"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.055379 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.055496 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.055560 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7g4lh" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.056241 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.056804 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.058277 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.059445 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.059736 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.059877 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060093 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060201 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060276 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060349 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060422 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060438 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060453 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060495 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060593 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060753 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.060833 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.061267 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.067676 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.068360 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.068843 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.069612 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.069881 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.071315 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.071482 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.071659 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.071937 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.071982 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qngll"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.071947 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072244 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072331 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072400 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072477 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072621 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072754 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072808 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.072876 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.073008 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.073160 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.073267 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.073335 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.074246 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.074552 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.075173 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-kb4kp"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.077679 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.088846 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091075 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjvvz\" (UniqueName: \"kubernetes.io/projected/6b62bff3-6957-454f-94bc-7a0579e32b2c-kube-api-access-sjvvz\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091116 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-config\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091142 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a163af1-9710-4335-ada6-08578cb822b0-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091162 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-etcd-service-ca\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091202 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091224 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a163af1-9710-4335-ada6-08578cb822b0-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091245 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091266 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-config\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091284 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-trusted-ca-bundle\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091308 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f85396f-69ce-4da8-8b25-2ca4517ff116-trusted-ca\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091330 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxz6f\" (UniqueName: \"kubernetes.io/projected/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-kube-api-access-hxz6f\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091352 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-config\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091373 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-service-ca-bundle\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091397 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-service-ca\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091413 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091434 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-oauth-serving-cert\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091473 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091498 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bmm8\" (UniqueName: \"kubernetes.io/projected/39da966e-8c22-4165-81e3-be4df8c3e0d8-kube-api-access-9bmm8\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091517 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-oauth-config\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091885 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091908 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwjhh\" (UniqueName: \"kubernetes.io/projected/86f49176-9115-4335-b2b9-2b37af823eca-kube-api-access-xwjhh\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.092025 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.092794 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-client-ca\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.093447 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.094414 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-config\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.091932 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-client-ca\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.095894 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/090c4f71-7e7b-4a55-a85b-a1802f9bc398-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.095932 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/35c6d76c-8581-4697-bef1-dca1ba384f43-machine-approver-tls\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.095951 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-dir\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.095969 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/86f49176-9115-4335-b2b9-2b37af823eca-trusted-ca\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.095992 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096018 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72eb201a-f91c-4d66-af4f-2c11a40b52a3-serving-cert\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096038 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096058 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-config\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096076 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-config\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096094 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85396f-69ce-4da8-8b25-2ca4517ff116-config\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096111 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-metrics-certs\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096130 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-encryption-config\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096145 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76c97c39-16a9-4231-ade3-903f2eb8b4be-serving-cert\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096163 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-policies\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096181 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096227 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7xbn\" (UniqueName: \"kubernetes.io/projected/283afc15-304c-45e8-b9c6-7975cb98c34a-kube-api-access-d7xbn\") pod \"cluster-samples-operator-665b6dd947-c4ms2\" (UID: \"283afc15-304c-45e8-b9c6-7975cb98c34a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096248 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-etcd-serving-ca\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096268 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-encryption-config\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096287 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096306 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85396f-69ce-4da8-8b25-2ca4517ff116-serving-cert\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096327 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db28b581-1553-4c72-a11f-fb4c3d67a33f-serving-cert\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096343 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlphn\" (UniqueName: \"kubernetes.io/projected/db28b581-1553-4c72-a11f-fb4c3d67a33f-kube-api-access-nlphn\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096362 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09c13a85-d025-45f4-b81d-35ee15d4dc50-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096381 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v28wp\" (UniqueName: \"kubernetes.io/projected/9f85396f-69ce-4da8-8b25-2ca4517ff116-kube-api-access-v28wp\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096403 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-stats-auth\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096430 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096459 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096484 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a163af1-9710-4335-ada6-08578cb822b0-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096514 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096742 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-etcd-client\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096769 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bh2p\" (UniqueName: \"kubernetes.io/projected/35c6d76c-8581-4697-bef1-dca1ba384f43-kube-api-access-4bh2p\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096793 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd5gc\" (UniqueName: \"kubernetes.io/projected/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-kube-api-access-qd5gc\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096813 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6b62bff3-6957-454f-94bc-7a0579e32b2c-audit-dir\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096843 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-service-ca-bundle\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096872 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f899c69-b027-4354-a239-cac0f4f33164-serving-cert\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096891 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckhcv\" (UniqueName: \"kubernetes.io/projected/0a163af1-9710-4335-ada6-08578cb822b0-kube-api-access-ckhcv\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096914 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-image-import-ca\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096933 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-etcd-client\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096952 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096970 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2f899c69-b027-4354-a239-cac0f4f33164-etcd-client\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.096991 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-serving-cert\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097020 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097054 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qllgz\" (UniqueName: \"kubernetes.io/projected/76c97c39-16a9-4231-ade3-903f2eb8b4be-kube-api-access-qllgz\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097083 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c6d76c-8581-4697-bef1-dca1ba384f43-config\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097105 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69qvm\" (UniqueName: \"kubernetes.io/projected/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-kube-api-access-69qvm\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097126 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/283afc15-304c-45e8-b9c6-7975cb98c34a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c4ms2\" (UID: \"283afc15-304c-45e8-b9c6-7975cb98c34a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097154 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-audit-policies\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097178 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-serving-cert\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097204 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097224 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09c13a85-d025-45f4-b81d-35ee15d4dc50-config\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097244 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-config\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097264 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c9vr\" (UniqueName: \"kubernetes.io/projected/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-kube-api-access-9c9vr\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097296 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097319 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097339 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-etcd-ca\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097358 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6jfk\" (UniqueName: \"kubernetes.io/projected/090c4f71-7e7b-4a55-a85b-a1802f9bc398-kube-api-access-v6jfk\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097383 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-audit\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097402 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097422 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwwgq\" (UniqueName: \"kubernetes.io/projected/2f899c69-b027-4354-a239-cac0f4f33164-kube-api-access-bwwgq\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097440 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/39da966e-8c22-4165-81e3-be4df8c3e0d8-audit-dir\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097460 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjdvj\" (UniqueName: \"kubernetes.io/projected/72eb201a-f91c-4d66-af4f-2c11a40b52a3-kube-api-access-zjdvj\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097477 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99fb892f-33e1-4ec2-9a56-60461486f2ab-metrics-tls\") pod \"dns-operator-744455d44c-26cld\" (UID: \"99fb892f-33e1-4ec2-9a56-60461486f2ab\") " pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097496 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-serving-cert\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097515 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp2nb\" (UniqueName: \"kubernetes.io/projected/09c13a85-d025-45f4-b81d-35ee15d4dc50-kube-api-access-tp2nb\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097570 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/86f49176-9115-4335-b2b9-2b37af823eca-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097590 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-images\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097609 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-default-certificate\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097627 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-client-ca\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097655 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/39da966e-8c22-4165-81e3-be4df8c3e0d8-node-pullsecrets\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097676 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-serving-cert\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097694 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tcwc\" (UniqueName: \"kubernetes.io/projected/99fb892f-33e1-4ec2-9a56-60461486f2ab-kube-api-access-7tcwc\") pod \"dns-operator-744455d44c-26cld\" (UID: \"99fb892f-33e1-4ec2-9a56-60461486f2ab\") " pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097713 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86f49176-9115-4335-b2b9-2b37af823eca-metrics-tls\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097729 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090c4f71-7e7b-4a55-a85b-a1802f9bc398-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097748 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-config\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097769 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqds5\" (UniqueName: \"kubernetes.io/projected/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-kube-api-access-mqds5\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.097790 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/35c6d76c-8581-4697-bef1-dca1ba384f43-auth-proxy-config\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.098146 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6b62bff3-6957-454f-94bc-7a0579e32b2c-audit-dir\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.099340 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.104246 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.106606 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/39da966e-8c22-4165-81e3-be4df8c3e0d8-audit-dir\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.106621 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-etcd-client\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.106814 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.107435 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-images\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.107968 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-service-ca-bundle\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.108703 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-image-import-ca\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.109127 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.109155 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-audit\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.109179 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/39da966e-8c22-4165-81e3-be4df8c3e0d8-node-pullsecrets\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.109854 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.109886 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.110038 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.113785 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-config\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.114637 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-config\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.115215 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-etcd-serving-ca\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.115415 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39da966e-8c22-4165-81e3-be4df8c3e0d8-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.115438 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.115452 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.115631 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.115896 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.116862 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.118022 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72eb201a-f91c-4d66-af4f-2c11a40b52a3-serving-cert\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.119203 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-etcd-client\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.119375 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72eb201a-f91c-4d66-af4f-2c11a40b52a3-config\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.120502 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-serving-cert\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.120504 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6b62bff3-6957-454f-94bc-7a0579e32b2c-audit-policies\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.121446 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-encryption-config\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.121602 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76c97c39-16a9-4231-ade3-903f2eb8b4be-serving-cert\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.123861 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b62bff3-6957-454f-94bc-7a0579e32b2c-serving-cert\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.127719 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.128003 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-26cld"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.128043 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.130429 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.130781 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.130936 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.131273 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.131332 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.132738 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.133454 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.134041 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w96mt"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.134188 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.135107 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fvflb"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.138925 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.141695 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-ghjjk"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.145557 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.145994 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-f45ch"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.135303 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.142151 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.149448 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.149601 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.135198 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/39da966e-8c22-4165-81e3-be4df8c3e0d8-encryption-config\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.139619 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.146681 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.144866 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.150221 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.150433 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.151359 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.151519 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.151835 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.152080 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.152392 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6xcl5"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.152963 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.153206 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.153387 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.153958 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.153997 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.154966 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.155630 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.156996 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.157201 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.158502 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.160793 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wbnjk"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.162056 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-9wjhp"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.162219 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.162378 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-zmmsk"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.162492 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.162718 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2wdfl"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.163094 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.163143 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.163709 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.165684 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.167098 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.177001 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d6kn6"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.177180 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.178686 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.182736 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fvflb"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.183647 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.184201 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.185086 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.186826 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-f45ch"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.187925 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.189005 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kw5b6"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.190066 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-sdtpv"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.190989 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.191225 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.192315 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.193405 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.194512 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.195573 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7g4lh"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.196806 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2wdfl"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198382 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198430 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-srv-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198463 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-metrics-certs\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198492 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198519 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7xbn\" (UniqueName: \"kubernetes.io/projected/283afc15-304c-45e8-b9c6-7975cb98c34a-kube-api-access-d7xbn\") pod \"cluster-samples-operator-665b6dd947-c4ms2\" (UID: \"283afc15-304c-45e8-b9c6-7975cb98c34a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198566 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db28b581-1553-4c72-a11f-fb4c3d67a33f-serving-cert\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198588 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlphn\" (UniqueName: \"kubernetes.io/projected/db28b581-1553-4c72-a11f-fb4c3d67a33f-kube-api-access-nlphn\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198608 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09c13a85-d025-45f4-b81d-35ee15d4dc50-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198631 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v28wp\" (UniqueName: \"kubernetes.io/projected/9f85396f-69ce-4da8-8b25-2ca4517ff116-kube-api-access-v28wp\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198651 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-stats-auth\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198675 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/41770d46-8567-4b2f-b782-910f08b0b373-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7crr\" (UID: \"41770d46-8567-4b2f-b782-910f08b0b373\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198699 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd239891-15f6-4d39-bec1-10bb3318c2ee-webhook-cert\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198722 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/093dcb08-56c5-475b-b31d-00451f608c85-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198755 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/31b2aea6-ff5a-4742-8821-b51e2075eea6-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198781 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bh2p\" (UniqueName: \"kubernetes.io/projected/35c6d76c-8581-4697-bef1-dca1ba384f43-kube-api-access-4bh2p\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198805 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd5gc\" (UniqueName: \"kubernetes.io/projected/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-kube-api-access-qd5gc\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198828 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd239891-15f6-4d39-bec1-10bb3318c2ee-tmpfs\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198850 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f899c69-b027-4354-a239-cac0f4f33164-serving-cert\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198874 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198897 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198928 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/283afc15-304c-45e8-b9c6-7975cb98c34a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c4ms2\" (UID: \"283afc15-304c-45e8-b9c6-7975cb98c34a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198952 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-serving-cert\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198977 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09c13a85-d025-45f4-b81d-35ee15d4dc50-config\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.198999 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b2aea6-ff5a-4742-8821-b51e2075eea6-config\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199023 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199046 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199071 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6jfk\" (UniqueName: \"kubernetes.io/projected/090c4f71-7e7b-4a55-a85b-a1802f9bc398-kube-api-access-v6jfk\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199092 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-serving-cert\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199115 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp2nb\" (UniqueName: \"kubernetes.io/projected/09c13a85-d025-45f4-b81d-35ee15d4dc50-kube-api-access-tp2nb\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199136 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/86f49176-9115-4335-b2b9-2b37af823eca-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199157 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tcwc\" (UniqueName: \"kubernetes.io/projected/99fb892f-33e1-4ec2-9a56-60461486f2ab-kube-api-access-7tcwc\") pod \"dns-operator-744455d44c-26cld\" (UID: \"99fb892f-33e1-4ec2-9a56-60461486f2ab\") " pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199178 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86f49176-9115-4335-b2b9-2b37af823eca-metrics-tls\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199211 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-config\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199233 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-etcd-service-ca\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199256 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghgnk\" (UniqueName: \"kubernetes.io/projected/8f76f36e-152f-4386-bb8b-79a98d8bbfa6-kube-api-access-ghgnk\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9ck6\" (UID: \"8f76f36e-152f-4386-bb8b-79a98d8bbfa6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199295 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f85396f-69ce-4da8-8b25-2ca4517ff116-trusted-ca\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199317 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxz6f\" (UniqueName: \"kubernetes.io/projected/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-kube-api-access-hxz6f\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199340 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/093dcb08-56c5-475b-b31d-00451f608c85-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199363 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-config\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199385 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-service-ca-bundle\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199407 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-service-ca\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199430 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199443 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199454 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-oauth-serving-cert\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199479 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbftz\" (UniqueName: \"kubernetes.io/projected/bd239891-15f6-4d39-bec1-10bb3318c2ee-kube-api-access-xbftz\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199506 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vhf4\" (UniqueName: \"kubernetes.io/projected/5aba1d28-543f-4f9b-a25d-ceca24889466-kube-api-access-5vhf4\") pod \"downloads-7954f5f757-7g4lh\" (UID: \"5aba1d28-543f-4f9b-a25d-ceca24889466\") " pod="openshift-console/downloads-7954f5f757-7g4lh" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199609 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199637 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd239891-15f6-4d39-bec1-10bb3318c2ee-apiservice-cert\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199665 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/35c6d76c-8581-4697-bef1-dca1ba384f43-machine-approver-tls\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199689 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-dir\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199713 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/86f49176-9115-4335-b2b9-2b37af823eca-trusted-ca\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.199737 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/090c4f71-7e7b-4a55-a85b-a1802f9bc398-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.200899 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201045 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9wjhp"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201083 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6xcl5"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201232 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201680 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7sqd\" (UniqueName: \"kubernetes.io/projected/4807ba73-41fb-433c-8097-3fa9ab5b1495-kube-api-access-p7sqd\") pod \"migrator-59844c95c7-58fhh\" (UID: \"4807ba73-41fb-433c-8097-3fa9ab5b1495\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201723 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-service-ca\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201736 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201770 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-config\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201816 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85396f-69ce-4da8-8b25-2ca4517ff116-config\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201850 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093dcb08-56c5-475b-b31d-00451f608c85-config\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201964 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-policies\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.201998 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85396f-69ce-4da8-8b25-2ca4517ff116-serving-cert\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202038 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202074 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sg5r\" (UniqueName: \"kubernetes.io/projected/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-kube-api-access-8sg5r\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202110 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a163af1-9710-4335-ada6-08578cb822b0-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202143 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kxlp\" (UniqueName: \"kubernetes.io/projected/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-kube-api-access-8kxlp\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202179 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202218 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31b2aea6-ff5a-4742-8821-b51e2075eea6-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202254 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckhcv\" (UniqueName: \"kubernetes.io/projected/0a163af1-9710-4335-ada6-08578cb822b0-kube-api-access-ckhcv\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202290 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-serving-cert\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202305 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f85396f-69ce-4da8-8b25-2ca4517ff116-trusted-ca\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202321 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-config\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202354 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c6d76c-8581-4697-bef1-dca1ba384f43-config\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202359 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-service-ca-bundle\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202387 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69qvm\" (UniqueName: \"kubernetes.io/projected/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-kube-api-access-69qvm\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202423 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2f899c69-b027-4354-a239-cac0f4f33164-etcd-client\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202463 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c9vr\" (UniqueName: \"kubernetes.io/projected/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-kube-api-access-9c9vr\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202493 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-etcd-ca\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202564 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwwgq\" (UniqueName: \"kubernetes.io/projected/2f899c69-b027-4354-a239-cac0f4f33164-kube-api-access-bwwgq\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202574 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-oauth-serving-cert\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202614 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99fb892f-33e1-4ec2-9a56-60461486f2ab-metrics-tls\") pod \"dns-operator-744455d44c-26cld\" (UID: \"99fb892f-33e1-4ec2-9a56-60461486f2ab\") " pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202654 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-default-certificate\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202686 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-client-ca\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202734 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090c4f71-7e7b-4a55-a85b-a1802f9bc398-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202766 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/35c6d76c-8581-4697-bef1-dca1ba384f43-auth-proxy-config\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202801 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202803 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8f76f36e-152f-4386-bb8b-79a98d8bbfa6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9ck6\" (UID: \"8f76f36e-152f-4386-bb8b-79a98d8bbfa6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202853 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a163af1-9710-4335-ada6-08578cb822b0-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202881 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202904 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a163af1-9710-4335-ada6-08578cb822b0-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202945 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-trusted-ca-bundle\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.202977 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttdvc\" (UniqueName: \"kubernetes.io/projected/41770d46-8567-4b2f-b782-910f08b0b373-kube-api-access-ttdvc\") pod \"package-server-manager-789f6589d5-h7crr\" (UID: \"41770d46-8567-4b2f-b782-910f08b0b373\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.203018 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwjhh\" (UniqueName: \"kubernetes.io/projected/86f49176-9115-4335-b2b9-2b37af823eca-kube-api-access-xwjhh\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.203050 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-oauth-config\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.203075 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.203248 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-policies\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.204892 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.205362 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-dir\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.205794 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wbnjk"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.205835 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.205851 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.205955 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-config\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.206413 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/86f49176-9115-4335-b2b9-2b37af823eca-trusted-ca\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.206449 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-stats-auth\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.207001 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-client-ca\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.207175 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.207487 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09c13a85-d025-45f4-b81d-35ee15d4dc50-config\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.207499 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.207505 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.208014 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-config\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.208459 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/86f49176-9115-4335-b2b9-2b37af823eca-metrics-tls\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.208537 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85396f-69ce-4da8-8b25-2ca4517ff116-config\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.209057 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/283afc15-304c-45e8-b9c6-7975cb98c34a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-c4ms2\" (UID: \"283afc15-304c-45e8-b9c6-7975cb98c34a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.209128 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-metrics-certs\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.209360 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-serving-cert\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.209632 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.209693 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.210031 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f899c69-b027-4354-a239-cac0f4f33164-serving-cert\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.210133 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-trusted-ca-bundle\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.210266 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.210298 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.210303 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-config\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.210420 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-serving-cert\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.210971 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85396f-69ce-4da8-8b25-2ca4517ff116-serving-cert\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.211178 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.211237 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.211431 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.212283 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sdtpv"] Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.212303 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.213010 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db28b581-1553-4c72-a11f-fb4c3d67a33f-serving-cert\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.213934 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-oauth-config\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.216587 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/35c6d76c-8581-4697-bef1-dca1ba384f43-auth-proxy-config\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.215948 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c6d76c-8581-4697-bef1-dca1ba384f43-config\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.216759 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09c13a85-d025-45f4-b81d-35ee15d4dc50-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.217749 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2f899c69-b027-4354-a239-cac0f4f33164-etcd-client\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.218301 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-default-certificate\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.218787 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/35c6d76c-8581-4697-bef1-dca1ba384f43-machine-approver-tls\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.220205 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.223311 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.227053 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-etcd-ca\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.245000 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.250456 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/2f899c69-b027-4354-a239-cac0f4f33164-etcd-service-ca\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.264242 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.283850 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.303985 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-srv-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.304152 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/41770d46-8567-4b2f-b782-910f08b0b373-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7crr\" (UID: \"41770d46-8567-4b2f-b782-910f08b0b373\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.304230 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd239891-15f6-4d39-bec1-10bb3318c2ee-webhook-cert\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.303999 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.304356 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/093dcb08-56c5-475b-b31d-00451f608c85-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.304435 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd239891-15f6-4d39-bec1-10bb3318c2ee-tmpfs\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.306567 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/31b2aea6-ff5a-4742-8821-b51e2075eea6-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.306668 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.306745 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b2aea6-ff5a-4742-8821-b51e2075eea6-config\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.306852 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghgnk\" (UniqueName: \"kubernetes.io/projected/8f76f36e-152f-4386-bb8b-79a98d8bbfa6-kube-api-access-ghgnk\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9ck6\" (UID: \"8f76f36e-152f-4386-bb8b-79a98d8bbfa6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.306926 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/093dcb08-56c5-475b-b31d-00451f608c85-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.306998 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbftz\" (UniqueName: \"kubernetes.io/projected/bd239891-15f6-4d39-bec1-10bb3318c2ee-kube-api-access-xbftz\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307081 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vhf4\" (UniqueName: \"kubernetes.io/projected/5aba1d28-543f-4f9b-a25d-ceca24889466-kube-api-access-5vhf4\") pod \"downloads-7954f5f757-7g4lh\" (UID: \"5aba1d28-543f-4f9b-a25d-ceca24889466\") " pod="openshift-console/downloads-7954f5f757-7g4lh" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307148 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd239891-15f6-4d39-bec1-10bb3318c2ee-apiservice-cert\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307229 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7sqd\" (UniqueName: \"kubernetes.io/projected/4807ba73-41fb-433c-8097-3fa9ab5b1495-kube-api-access-p7sqd\") pod \"migrator-59844c95c7-58fhh\" (UID: \"4807ba73-41fb-433c-8097-3fa9ab5b1495\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307304 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093dcb08-56c5-475b-b31d-00451f608c85-config\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307376 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sg5r\" (UniqueName: \"kubernetes.io/projected/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-kube-api-access-8sg5r\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307448 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kxlp\" (UniqueName: \"kubernetes.io/projected/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-kube-api-access-8kxlp\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.305035 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/bd239891-15f6-4d39-bec1-10bb3318c2ee-tmpfs\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307619 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31b2aea6-ff5a-4742-8821-b51e2075eea6-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307693 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-serving-cert\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307712 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-config\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307807 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8f76f36e-152f-4386-bb8b-79a98d8bbfa6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9ck6\" (UID: \"8f76f36e-152f-4386-bb8b-79a98d8bbfa6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.307873 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttdvc\" (UniqueName: \"kubernetes.io/projected/41770d46-8567-4b2f-b782-910f08b0b373-kube-api-access-ttdvc\") pod \"package-server-manager-789f6589d5-h7crr\" (UID: \"41770d46-8567-4b2f-b782-910f08b0b373\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.333621 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.339878 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a163af1-9710-4335-ada6-08578cb822b0-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.343985 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.364906 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.371920 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/090c4f71-7e7b-4a55-a85b-a1802f9bc398-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.384191 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.387158 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/090c4f71-7e7b-4a55-a85b-a1802f9bc398-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.406189 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.425165 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.444108 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.452210 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a163af1-9710-4335-ada6-08578cb822b0-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.464311 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.483675 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.503810 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.510742 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99fb892f-33e1-4ec2-9a56-60461486f2ab-metrics-tls\") pod \"dns-operator-744455d44c-26cld\" (UID: \"99fb892f-33e1-4ec2-9a56-60461486f2ab\") " pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.524091 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.543970 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.563855 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.584386 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.604302 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.629403 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.645361 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.665816 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.679768 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/093dcb08-56c5-475b-b31d-00451f608c85-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.684379 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.688757 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/093dcb08-56c5-475b-b31d-00451f608c85-config\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.753831 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjvvz\" (UniqueName: \"kubernetes.io/projected/6b62bff3-6957-454f-94bc-7a0579e32b2c-kube-api-access-sjvvz\") pod \"apiserver-7bbb656c7d-wlzmv\" (UID: \"6b62bff3-6957-454f-94bc-7a0579e32b2c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.769751 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bmm8\" (UniqueName: \"kubernetes.io/projected/39da966e-8c22-4165-81e3-be4df8c3e0d8-kube-api-access-9bmm8\") pod \"apiserver-76f77b778f-hvvf8\" (UID: \"39da966e-8c22-4165-81e3-be4df8c3e0d8\") " pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.797446 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qllgz\" (UniqueName: \"kubernetes.io/projected/76c97c39-16a9-4231-ade3-903f2eb8b4be-kube-api-access-qllgz\") pod \"controller-manager-879f6c89f-2ckdw\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.807104 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjdvj\" (UniqueName: \"kubernetes.io/projected/72eb201a-f91c-4d66-af4f-2c11a40b52a3-kube-api-access-zjdvj\") pod \"authentication-operator-69f744f599-qngll\" (UID: \"72eb201a-f91c-4d66-af4f-2c11a40b52a3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.820718 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.825316 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.826306 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqds5\" (UniqueName: \"kubernetes.io/projected/1576e0fb-2ebb-475e-abbc-6d4da8aca68c-kube-api-access-mqds5\") pod \"machine-api-operator-5694c8668f-sr684\" (UID: \"1576e0fb-2ebb-475e-abbc-6d4da8aca68c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.844131 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.855110 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.855120 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.864447 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.867070 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.869549 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.885204 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.900975 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.905069 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.914487 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.927427 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.945999 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.965903 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 16:12:28 crc kubenswrapper[4954]: I1128 16:12:28.987305 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.004931 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.024668 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.044563 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.052421 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-serving-cert\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.064425 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.084800 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.090952 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-config\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.103681 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.112423 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8f76f36e-152f-4386-bb8b-79a98d8bbfa6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9ck6\" (UID: \"8f76f36e-152f-4386-bb8b-79a98d8bbfa6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.124547 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.144666 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.163129 4954 request.go:700] Waited for 1.013170257s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/secrets?fieldSelector=metadata.name%3Dmarketplace-operator-metrics&limit=500&resourceVersion=0 Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.165476 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.186446 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.212715 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.223723 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.225513 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qngll"] Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.230145 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv"] Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.244453 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.248734 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd239891-15f6-4d39-bec1-10bb3318c2ee-webhook-cert\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.251019 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd239891-15f6-4d39-bec1-10bb3318c2ee-apiservice-cert\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.264267 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.287889 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.304996 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hvvf8"] Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.305199 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.305361 4954 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.305407 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-srv-cert podName:1f4e9b0c-5e6d-4838-8af9-1921e28f4c68 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:29.805391351 +0000 UTC m=+103.197059892 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-srv-cert") pod "olm-operator-6b444d44fb-4zwww" (UID: "1f4e9b0c-5e6d-4838-8af9-1921e28f4c68") : failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.305549 4954 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.305578 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/41770d46-8567-4b2f-b782-910f08b0b373-package-server-manager-serving-cert podName:41770d46-8567-4b2f-b782-910f08b0b373 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:29.805570547 +0000 UTC m=+103.197239088 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/41770d46-8567-4b2f-b782-910f08b0b373-package-server-manager-serving-cert") pod "package-server-manager-789f6589d5-h7crr" (UID: "41770d46-8567-4b2f-b782-910f08b0b373") : failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.307725 4954 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.307837 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-profile-collector-cert podName:1f4e9b0c-5e6d-4838-8af9-1921e28f4c68 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:29.80781088 +0000 UTC m=+103.199479461 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-profile-collector-cert") pod "olm-operator-6b444d44fb-4zwww" (UID: "1f4e9b0c-5e6d-4838-8af9-1921e28f4c68") : failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.307745 4954 configmap.go:193] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.307895 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/31b2aea6-ff5a-4742-8821-b51e2075eea6-config podName:31b2aea6-ff5a-4742-8821-b51e2075eea6 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:29.807883132 +0000 UTC m=+103.199551713 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/31b2aea6-ff5a-4742-8821-b51e2075eea6-config") pod "kube-controller-manager-operator-78b949d7b-b564n" (UID: "31b2aea6-ff5a-4742-8821-b51e2075eea6") : failed to sync configmap cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.308830 4954 secret.go:188] Couldn't get secret openshift-kube-controller-manager-operator/kube-controller-manager-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: E1128 16:12:29.308897 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/31b2aea6-ff5a-4742-8821-b51e2075eea6-serving-cert podName:31b2aea6-ff5a-4742-8821-b51e2075eea6 nodeName:}" failed. No retries permitted until 2025-11-28 16:12:29.808879665 +0000 UTC m=+103.200548206 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/31b2aea6-ff5a-4742-8821-b51e2075eea6-serving-cert") pod "kube-controller-manager-operator-78b949d7b-b564n" (UID: "31b2aea6-ff5a-4742-8821-b51e2075eea6") : failed to sync secret cache: timed out waiting for the condition Nov 28 16:12:29 crc kubenswrapper[4954]: W1128 16:12:29.316780 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39da966e_8c22_4165_81e3_be4df8c3e0d8.slice/crio-59629b0c591bf16c88aaf8f6a8d54939f69ab7993188f9c9cc31e87eae70783b WatchSource:0}: Error finding container 59629b0c591bf16c88aaf8f6a8d54939f69ab7993188f9c9cc31e87eae70783b: Status 404 returned error can't find the container with id 59629b0c591bf16c88aaf8f6a8d54939f69ab7993188f9c9cc31e87eae70783b Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.324053 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.342649 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-sr684"] Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.343790 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.353868 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2ckdw"] Nov 28 16:12:29 crc kubenswrapper[4954]: W1128 16:12:29.354083 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1576e0fb_2ebb_475e_abbc_6d4da8aca68c.slice/crio-e379da794a3648e5b32b2d501007cfe98d5da3851ba950c4d53624a566dd43ea WatchSource:0}: Error finding container e379da794a3648e5b32b2d501007cfe98d5da3851ba950c4d53624a566dd43ea: Status 404 returned error can't find the container with id e379da794a3648e5b32b2d501007cfe98d5da3851ba950c4d53624a566dd43ea Nov 28 16:12:29 crc kubenswrapper[4954]: W1128 16:12:29.364390 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76c97c39_16a9_4231_ade3_903f2eb8b4be.slice/crio-35b701abcbc31d4e57bfce215793cd6b17204e941b68e028a8e2869ba40423d2 WatchSource:0}: Error finding container 35b701abcbc31d4e57bfce215793cd6b17204e941b68e028a8e2869ba40423d2: Status 404 returned error can't find the container with id 35b701abcbc31d4e57bfce215793cd6b17204e941b68e028a8e2869ba40423d2 Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.364473 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.383905 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.404149 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.425079 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.444005 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.463896 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.484709 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.503882 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.524167 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.546867 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.564935 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.584129 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.604757 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.623881 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.644359 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.664571 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.704867 4954 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.725235 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.746024 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.765939 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.785418 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.804395 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.824315 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.834102 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b2aea6-ff5a-4742-8821-b51e2075eea6-config\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.834208 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31b2aea6-ff5a-4742-8821-b51e2075eea6-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.834311 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-srv-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.834369 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/41770d46-8567-4b2f-b782-910f08b0b373-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7crr\" (UID: \"41770d46-8567-4b2f-b782-910f08b0b373\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.834414 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.834673 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31b2aea6-ff5a-4742-8821-b51e2075eea6-config\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.838626 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/31b2aea6-ff5a-4742-8821-b51e2075eea6-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.838779 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/41770d46-8567-4b2f-b782-910f08b0b373-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7crr\" (UID: \"41770d46-8567-4b2f-b782-910f08b0b373\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.839265 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-profile-collector-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.840269 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-srv-cert\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.848075 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.864924 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.890129 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.903763 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.924378 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.944333 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.964353 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.974122 4954 generic.go:334] "Generic (PLEG): container finished" podID="39da966e-8c22-4165-81e3-be4df8c3e0d8" containerID="fd7e67429f64e786fe72b989c326f92ecabab0fcd5657c430d5b6aeddf5eccf5" exitCode=0 Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.974190 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" event={"ID":"39da966e-8c22-4165-81e3-be4df8c3e0d8","Type":"ContainerDied","Data":"fd7e67429f64e786fe72b989c326f92ecabab0fcd5657c430d5b6aeddf5eccf5"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.974221 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" event={"ID":"39da966e-8c22-4165-81e3-be4df8c3e0d8","Type":"ContainerStarted","Data":"59629b0c591bf16c88aaf8f6a8d54939f69ab7993188f9c9cc31e87eae70783b"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.976380 4954 generic.go:334] "Generic (PLEG): container finished" podID="6b62bff3-6957-454f-94bc-7a0579e32b2c" containerID="20460640171bbc68d4b9ab2a80f820cfe347588bcc48b0b07488c12d4af2477e" exitCode=0 Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.976477 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" event={"ID":"6b62bff3-6957-454f-94bc-7a0579e32b2c","Type":"ContainerDied","Data":"20460640171bbc68d4b9ab2a80f820cfe347588bcc48b0b07488c12d4af2477e"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.976586 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" event={"ID":"6b62bff3-6957-454f-94bc-7a0579e32b2c","Type":"ContainerStarted","Data":"3ab67415c374736819e65596b697eca2bb2aa005e1a2fa75bdea873ab325b0f5"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.982987 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" event={"ID":"1576e0fb-2ebb-475e-abbc-6d4da8aca68c","Type":"ContainerStarted","Data":"50969e10fa0a647ce6b0552651e9fd88afe9de0ab408ec9dc60d151615cf437d"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.983017 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" event={"ID":"1576e0fb-2ebb-475e-abbc-6d4da8aca68c","Type":"ContainerStarted","Data":"e379da794a3648e5b32b2d501007cfe98d5da3851ba950c4d53624a566dd43ea"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.983622 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.985078 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" event={"ID":"72eb201a-f91c-4d66-af4f-2c11a40b52a3","Type":"ContainerStarted","Data":"f7907faee6d2eb4fc3fcdca13eb3e6f35510f904f646823f5dc54215e3e917ef"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.985113 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" event={"ID":"72eb201a-f91c-4d66-af4f-2c11a40b52a3","Type":"ContainerStarted","Data":"c078b3a9759d3a35914c6779935feea2f25c9c046c04683a7a7430b197dfb262"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.991447 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" event={"ID":"76c97c39-16a9-4231-ade3-903f2eb8b4be","Type":"ContainerStarted","Data":"fb88fdfb0ee734d4683587f69f3ea731ea17b6e739b670bec69ec99a2c40f145"} Nov 28 16:12:29 crc kubenswrapper[4954]: I1128 16:12:29.991486 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" event={"ID":"76c97c39-16a9-4231-ade3-903f2eb8b4be","Type":"ContainerStarted","Data":"35b701abcbc31d4e57bfce215793cd6b17204e941b68e028a8e2869ba40423d2"} Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.003540 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.024163 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.044797 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.064443 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.084137 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.118074 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxz6f\" (UniqueName: \"kubernetes.io/projected/2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7-kube-api-access-hxz6f\") pod \"router-default-5444994796-5zg2k\" (UID: \"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7\") " pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.146137 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tcwc\" (UniqueName: \"kubernetes.io/projected/99fb892f-33e1-4ec2-9a56-60461486f2ab-kube-api-access-7tcwc\") pod \"dns-operator-744455d44c-26cld\" (UID: \"99fb892f-33e1-4ec2-9a56-60461486f2ab\") " pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.163467 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v28wp\" (UniqueName: \"kubernetes.io/projected/9f85396f-69ce-4da8-8b25-2ca4517ff116-kube-api-access-v28wp\") pod \"console-operator-58897d9998-kb4kp\" (UID: \"9f85396f-69ce-4da8-8b25-2ca4517ff116\") " pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.178153 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7xbn\" (UniqueName: \"kubernetes.io/projected/283afc15-304c-45e8-b9c6-7975cb98c34a-kube-api-access-d7xbn\") pod \"cluster-samples-operator-665b6dd947-c4ms2\" (UID: \"283afc15-304c-45e8-b9c6-7975cb98c34a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.182748 4954 request.go:700] Waited for 1.982042889s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager-operator/serviceaccounts/openshift-controller-manager-operator/token Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.209891 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6jfk\" (UniqueName: \"kubernetes.io/projected/090c4f71-7e7b-4a55-a85b-a1802f9bc398-kube-api-access-v6jfk\") pod \"openshift-controller-manager-operator-756b6f6bc6-txr4x\" (UID: \"090c4f71-7e7b-4a55-a85b-a1802f9bc398\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.223992 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp2nb\" (UniqueName: \"kubernetes.io/projected/09c13a85-d025-45f4-b81d-35ee15d4dc50-kube-api-access-tp2nb\") pod \"openshift-apiserver-operator-796bbdcf4f-48l9m\" (UID: \"09c13a85-d025-45f4-b81d-35ee15d4dc50\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.237347 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlphn\" (UniqueName: \"kubernetes.io/projected/db28b581-1553-4c72-a11f-fb4c3d67a33f-kube-api-access-nlphn\") pod \"route-controller-manager-6576b87f9c-bdctj\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.247318 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.258777 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.261070 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/86f49176-9115-4335-b2b9-2b37af823eca-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.279093 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckhcv\" (UniqueName: \"kubernetes.io/projected/0a163af1-9710-4335-ada6-08578cb822b0-kube-api-access-ckhcv\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.300107 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bh2p\" (UniqueName: \"kubernetes.io/projected/35c6d76c-8581-4697-bef1-dca1ba384f43-kube-api-access-4bh2p\") pod \"machine-approver-56656f9798-4zkjb\" (UID: \"35c6d76c-8581-4697-bef1-dca1ba384f43\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.308899 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.326769 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd5gc\" (UniqueName: \"kubernetes.io/projected/529ebb9e-62c6-4708-a8dd-bbe4092d04fb-kube-api-access-qd5gc\") pod \"openshift-config-operator-7777fb866f-hk8pv\" (UID: \"529ebb9e-62c6-4708-a8dd-bbe4092d04fb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.340219 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69qvm\" (UniqueName: \"kubernetes.io/projected/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-kube-api-access-69qvm\") pod \"console-f9d7485db-kw5b6\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.352451 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" Nov 28 16:12:30 crc kubenswrapper[4954]: W1128 16:12:30.359191 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ed5b5e7_5b12_49fb_96b7_c52dbb8531d7.slice/crio-32dfd392cbd68b5f2951fbadbca7b9a66898e1aff81353d48fadafa584ffac7b WatchSource:0}: Error finding container 32dfd392cbd68b5f2951fbadbca7b9a66898e1aff81353d48fadafa584ffac7b: Status 404 returned error can't find the container with id 32dfd392cbd68b5f2951fbadbca7b9a66898e1aff81353d48fadafa584ffac7b Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.362037 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c9vr\" (UniqueName: \"kubernetes.io/projected/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-kube-api-access-9c9vr\") pod \"oauth-openshift-558db77b4-d6kn6\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.369905 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-26cld" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.378708 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwwgq\" (UniqueName: \"kubernetes.io/projected/2f899c69-b027-4354-a239-cac0f4f33164-kube-api-access-bwwgq\") pod \"etcd-operator-b45778765-ghjjk\" (UID: \"2f899c69-b027-4354-a239-cac0f4f33164\") " pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.403398 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a163af1-9710-4335-ada6-08578cb822b0-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hgqg7\" (UID: \"0a163af1-9710-4335-ada6-08578cb822b0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.421731 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwjhh\" (UniqueName: \"kubernetes.io/projected/86f49176-9115-4335-b2b9-2b37af823eca-kube-api-access-xwjhh\") pod \"ingress-operator-5b745b69d9-rfwtj\" (UID: \"86f49176-9115-4335-b2b9-2b37af823eca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.439927 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2"] Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.441004 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.442077 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/31b2aea6-ff5a-4742-8821-b51e2075eea6-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-b564n\" (UID: \"31b2aea6-ff5a-4742-8821-b51e2075eea6\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.453906 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.460683 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.464346 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vhf4\" (UniqueName: \"kubernetes.io/projected/5aba1d28-543f-4f9b-a25d-ceca24889466-kube-api-access-5vhf4\") pod \"downloads-7954f5f757-7g4lh\" (UID: \"5aba1d28-543f-4f9b-a25d-ceca24889466\") " pod="openshift-console/downloads-7954f5f757-7g4lh" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.483644 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-kb4kp"] Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.486711 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghgnk\" (UniqueName: \"kubernetes.io/projected/8f76f36e-152f-4386-bb8b-79a98d8bbfa6-kube-api-access-ghgnk\") pod \"control-plane-machine-set-operator-78cbb6b69f-c9ck6\" (UID: \"8f76f36e-152f-4386-bb8b-79a98d8bbfa6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.491610 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.500031 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/093dcb08-56c5-475b-b31d-00451f608c85-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-xgj9p\" (UID: \"093dcb08-56c5-475b-b31d-00451f608c85\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.521766 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbftz\" (UniqueName: \"kubernetes.io/projected/bd239891-15f6-4d39-bec1-10bb3318c2ee-kube-api-access-xbftz\") pod \"packageserver-d55dfcdfc-v4786\" (UID: \"bd239891-15f6-4d39-bec1-10bb3318c2ee\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:30 crc kubenswrapper[4954]: W1128 16:12:30.522019 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f85396f_69ce_4da8_8b25_2ca4517ff116.slice/crio-59854eff9276252b47fbc492d9b4e6d558d5b46aa5f3cf6f168541e2632f2485 WatchSource:0}: Error finding container 59854eff9276252b47fbc492d9b4e6d558d5b46aa5f3cf6f168541e2632f2485: Status 404 returned error can't find the container with id 59854eff9276252b47fbc492d9b4e6d558d5b46aa5f3cf6f168541e2632f2485 Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.533896 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.546258 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7sqd\" (UniqueName: \"kubernetes.io/projected/4807ba73-41fb-433c-8097-3fa9ab5b1495-kube-api-access-p7sqd\") pod \"migrator-59844c95c7-58fhh\" (UID: \"4807ba73-41fb-433c-8097-3fa9ab5b1495\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.563434 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x"] Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.563979 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sg5r\" (UniqueName: \"kubernetes.io/projected/aaee1061-6c26-48aa-ba83-67ccfbfa67a6-kube-api-access-8sg5r\") pod \"service-ca-operator-777779d784-f45ch\" (UID: \"aaee1061-6c26-48aa-ba83-67ccfbfa67a6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.576590 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.581032 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kxlp\" (UniqueName: \"kubernetes.io/projected/1f4e9b0c-5e6d-4838-8af9-1921e28f4c68-kube-api-access-8kxlp\") pod \"olm-operator-6b444d44fb-4zwww\" (UID: \"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.587626 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:30 crc kubenswrapper[4954]: W1128 16:12:30.594647 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod090c4f71_7e7b_4a55_a85b_a1802f9bc398.slice/crio-978e12f6e11bf3f9dbf49c2453848ff67bae070320cb0de93b03a930624234c8 WatchSource:0}: Error finding container 978e12f6e11bf3f9dbf49c2453848ff67bae070320cb0de93b03a930624234c8: Status 404 returned error can't find the container with id 978e12f6e11bf3f9dbf49c2453848ff67bae070320cb0de93b03a930624234c8 Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.594867 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.613395 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttdvc\" (UniqueName: \"kubernetes.io/projected/41770d46-8567-4b2f-b782-910f08b0b373-kube-api-access-ttdvc\") pod \"package-server-manager-789f6589d5-h7crr\" (UID: \"41770d46-8567-4b2f-b782-910f08b0b373\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.619126 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-26cld"] Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.623936 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.640679 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.643204 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.645851 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.660458 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.663721 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.667183 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/19107b9e-d11e-4360-a6c8-dfc9e96a8623-metrics-certs\") pod \"network-metrics-daemon-mrxnm\" (UID: \"19107b9e-d11e-4360-a6c8-dfc9e96a8623\") " pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.687181 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7g4lh" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.688870 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.694542 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.704253 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-mrxnm" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.715769 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj"] Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.724720 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.737632 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746056 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-auth-proxy-config\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746124 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fa1e392-8e18-4d9a-b369-214965705443-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746211 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-trusted-ca\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746249 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746271 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59de1205-9501-4117-a463-b51fbe6c33d1-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746345 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvqx8\" (UniqueName: \"kubernetes.io/projected/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-kube-api-access-jvqx8\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746414 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d42a5371-0448-48d1-8df8-aee818ea6644-installation-pull-secrets\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746451 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmb2f\" (UniqueName: \"kubernetes.io/projected/e8c0d166-30c8-48da-8393-d03c99c1bddd-kube-api-access-dmb2f\") pod \"multus-admission-controller-857f4d67dd-6xcl5\" (UID: \"e8c0d166-30c8-48da-8393-d03c99c1bddd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746474 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa1e392-8e18-4d9a-b369-214965705443-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746511 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-proxy-tls\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746559 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e8c0d166-30c8-48da-8393-d03c99c1bddd-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6xcl5\" (UID: \"e8c0d166-30c8-48da-8393-d03c99c1bddd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746598 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746621 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvwmp\" (UniqueName: \"kubernetes.io/projected/59de1205-9501-4117-a463-b51fbe6c33d1-kube-api-access-fvwmp\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746644 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-srv-cert\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746712 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746738 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7lgn\" (UniqueName: \"kubernetes.io/projected/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-kube-api-access-z7lgn\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746763 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-bound-sa-token\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746785 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-profile-collector-cert\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746806 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54kfl\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-kube-api-access-54kfl\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746824 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59de1205-9501-4117-a463-b51fbe6c33d1-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746870 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d42a5371-0448-48d1-8df8-aee818ea6644-ca-trust-extracted\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.746959 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-images\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.747045 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa1e392-8e18-4d9a-b369-214965705443-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.747080 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs2gn\" (UniqueName: \"kubernetes.io/projected/9e51474f-c520-42db-ad78-42f143642a7e-kube-api-access-zs2gn\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.750220 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.750959 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-registry-certificates\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.751036 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-registry-tls\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: E1128 16:12:30.751955 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.251942203 +0000 UTC m=+104.643610744 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.767291 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m"] Nov 28 16:12:30 crc kubenswrapper[4954]: W1128 16:12:30.770840 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb28b581_1553_4c72_a11f_fb4c3d67a33f.slice/crio-bdf99ee384473f946ab3985744836582b11bc412da204e519c3ed245a0e50780 WatchSource:0}: Error finding container bdf99ee384473f946ab3985744836582b11bc412da204e519c3ed245a0e50780: Status 404 returned error can't find the container with id bdf99ee384473f946ab3985744836582b11bc412da204e519c3ed245a0e50780 Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.772715 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.790155 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d6kn6"] Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.810830 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.820847 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852630 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852789 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thbm4\" (UniqueName: \"kubernetes.io/projected/1c7fb7be-980b-491c-87a1-4495b97fc8e7-kube-api-access-thbm4\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852820 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvqx8\" (UniqueName: \"kubernetes.io/projected/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-kube-api-access-jvqx8\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852882 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f18abd7-b65d-47d8-af3e-44008988873e-secret-volume\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852939 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d42a5371-0448-48d1-8df8-aee818ea6644-installation-pull-secrets\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852963 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmb2f\" (UniqueName: \"kubernetes.io/projected/e8c0d166-30c8-48da-8393-d03c99c1bddd-kube-api-access-dmb2f\") pod \"multus-admission-controller-857f4d67dd-6xcl5\" (UID: \"e8c0d166-30c8-48da-8393-d03c99c1bddd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852979 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa1e392-8e18-4d9a-b369-214965705443-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.852998 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-proxy-tls\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.853013 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-772hj\" (UniqueName: \"kubernetes.io/projected/d7adf89c-ec5d-478c-b3eb-842a5994aada-kube-api-access-772hj\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.853029 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktqq2\" (UniqueName: \"kubernetes.io/projected/9d51e5f1-a57d-4c66-b8b3-c29c966af005-kube-api-access-ktqq2\") pod \"ingress-canary-9wjhp\" (UID: \"9d51e5f1-a57d-4c66-b8b3-c29c966af005\") " pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.853045 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e8c0d166-30c8-48da-8393-d03c99c1bddd-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6xcl5\" (UID: \"e8c0d166-30c8-48da-8393-d03c99c1bddd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.853069 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: E1128 16:12:30.853417 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.353396014 +0000 UTC m=+104.745064555 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854660 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvwmp\" (UniqueName: \"kubernetes.io/projected/59de1205-9501-4117-a463-b51fbe6c33d1-kube-api-access-fvwmp\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854727 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-srv-cert\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854753 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-registration-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854785 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854811 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfpt5\" (UniqueName: \"kubernetes.io/projected/78fe2f29-be96-4c11-939b-775929f004f3-kube-api-access-cfpt5\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854832 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-bound-sa-token\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854851 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7lgn\" (UniqueName: \"kubernetes.io/projected/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-kube-api-access-z7lgn\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854872 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/1c7fb7be-980b-491c-87a1-4495b97fc8e7-node-bootstrap-token\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854889 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-plugins-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854914 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-profile-collector-cert\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854945 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54kfl\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-kube-api-access-54kfl\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854964 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59de1205-9501-4117-a463-b51fbe6c33d1-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.854995 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d42a5371-0448-48d1-8df8-aee818ea6644-ca-trust-extracted\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855235 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-mountpoint-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855304 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-images\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855355 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9d51e5f1-a57d-4c66-b8b3-c29c966af005-cert\") pod \"ingress-canary-9wjhp\" (UID: \"9d51e5f1-a57d-4c66-b8b3-c29c966af005\") " pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855376 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2k2m\" (UniqueName: \"kubernetes.io/projected/160c98bc-d26b-4d97-af06-9d0cfbba17a3-kube-api-access-p2k2m\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855423 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fa1e392-8e18-4d9a-b369-214965705443-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855451 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/1c7fb7be-980b-491c-87a1-4495b97fc8e7-certs\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855515 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/160c98bc-d26b-4d97-af06-9d0cfbba17a3-metrics-tls\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855583 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa1e392-8e18-4d9a-b369-214965705443-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855602 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/160c98bc-d26b-4d97-af06-9d0cfbba17a3-config-volume\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855686 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs2gn\" (UniqueName: \"kubernetes.io/projected/9e51474f-c520-42db-ad78-42f143642a7e-kube-api-access-zs2gn\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855710 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-csi-data-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855727 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f18abd7-b65d-47d8-af3e-44008988873e-config-volume\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855772 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-registry-tls\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855791 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-registry-certificates\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855836 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hfc7\" (UniqueName: \"kubernetes.io/projected/2f18abd7-b65d-47d8-af3e-44008988873e-kube-api-access-9hfc7\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855869 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvp56\" (UniqueName: \"kubernetes.io/projected/e6444de3-0e80-40f8-812a-19620725657c-kube-api-access-vvp56\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855943 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-auth-proxy-config\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.855984 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d7adf89c-ec5d-478c-b3eb-842a5994aada-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.856032 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e6444de3-0e80-40f8-812a-19620725657c-signing-cabundle\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.856072 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fa1e392-8e18-4d9a-b369-214965705443-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.856099 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-socket-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.856148 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e6444de3-0e80-40f8-812a-19620725657c-signing-key\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.856171 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-trusted-ca\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.856203 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d7adf89c-ec5d-478c-b3eb-842a5994aada-proxy-tls\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.858212 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-auth-proxy-config\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: E1128 16:12:30.858714 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.358701196 +0000 UTC m=+104.750369737 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.864936 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.864968 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59de1205-9501-4117-a463-b51fbe6c33d1-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.864994 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59de1205-9501-4117-a463-b51fbe6c33d1-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.860609 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d42a5371-0448-48d1-8df8-aee818ea6644-ca-trust-extracted\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.863978 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-srv-cert\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.864492 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-images\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.876220 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.876417 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-trusted-ca\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.877560 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-registry-certificates\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.877568 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.883221 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59de1205-9501-4117-a463-b51fbe6c33d1-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.884431 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4fa1e392-8e18-4d9a-b369-214965705443-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.884452 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-registry-tls\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.887128 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d42a5371-0448-48d1-8df8-aee818ea6644-installation-pull-secrets\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.890567 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-profile-collector-cert\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.892757 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-proxy-tls\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.898346 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/e8c0d166-30c8-48da-8393-d03c99c1bddd-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6xcl5\" (UID: \"e8c0d166-30c8-48da-8393-d03c99c1bddd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.906718 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvqx8\" (UniqueName: \"kubernetes.io/projected/0e65a9be-8ef8-400b-935d-9ec8a2f51d4a-kube-api-access-jvqx8\") pod \"machine-config-operator-74547568cd-j85wr\" (UID: \"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.919294 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmb2f\" (UniqueName: \"kubernetes.io/projected/e8c0d166-30c8-48da-8393-d03c99c1bddd-kube-api-access-dmb2f\") pod \"multus-admission-controller-857f4d67dd-6xcl5\" (UID: \"e8c0d166-30c8-48da-8393-d03c99c1bddd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.940679 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvwmp\" (UniqueName: \"kubernetes.io/projected/59de1205-9501-4117-a463-b51fbe6c33d1-kube-api-access-fvwmp\") pod \"kube-storage-version-migrator-operator-b67b599dd-npvgc\" (UID: \"59de1205-9501-4117-a463-b51fbe6c33d1\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.966565 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs2gn\" (UniqueName: \"kubernetes.io/projected/9e51474f-c520-42db-ad78-42f143642a7e-kube-api-access-zs2gn\") pod \"marketplace-operator-79b997595-fvflb\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974127 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974300 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-772hj\" (UniqueName: \"kubernetes.io/projected/d7adf89c-ec5d-478c-b3eb-842a5994aada-kube-api-access-772hj\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974340 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktqq2\" (UniqueName: \"kubernetes.io/projected/9d51e5f1-a57d-4c66-b8b3-c29c966af005-kube-api-access-ktqq2\") pod \"ingress-canary-9wjhp\" (UID: \"9d51e5f1-a57d-4c66-b8b3-c29c966af005\") " pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974374 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-registration-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974411 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfpt5\" (UniqueName: \"kubernetes.io/projected/78fe2f29-be96-4c11-939b-775929f004f3-kube-api-access-cfpt5\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974435 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/1c7fb7be-980b-491c-87a1-4495b97fc8e7-node-bootstrap-token\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974459 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-plugins-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974518 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-mountpoint-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974566 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9d51e5f1-a57d-4c66-b8b3-c29c966af005-cert\") pod \"ingress-canary-9wjhp\" (UID: \"9d51e5f1-a57d-4c66-b8b3-c29c966af005\") " pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974589 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2k2m\" (UniqueName: \"kubernetes.io/projected/160c98bc-d26b-4d97-af06-9d0cfbba17a3-kube-api-access-p2k2m\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974616 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/1c7fb7be-980b-491c-87a1-4495b97fc8e7-certs\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974641 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/160c98bc-d26b-4d97-af06-9d0cfbba17a3-metrics-tls\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974663 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/160c98bc-d26b-4d97-af06-9d0cfbba17a3-config-volume\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974708 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-csi-data-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974732 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f18abd7-b65d-47d8-af3e-44008988873e-config-volume\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974778 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hfc7\" (UniqueName: \"kubernetes.io/projected/2f18abd7-b65d-47d8-af3e-44008988873e-kube-api-access-9hfc7\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974803 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvp56\" (UniqueName: \"kubernetes.io/projected/e6444de3-0e80-40f8-812a-19620725657c-kube-api-access-vvp56\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974832 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d7adf89c-ec5d-478c-b3eb-842a5994aada-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974858 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e6444de3-0e80-40f8-812a-19620725657c-signing-cabundle\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974893 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-socket-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974931 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e6444de3-0e80-40f8-812a-19620725657c-signing-key\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974959 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d7adf89c-ec5d-478c-b3eb-842a5994aada-proxy-tls\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.974989 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thbm4\" (UniqueName: \"kubernetes.io/projected/1c7fb7be-980b-491c-87a1-4495b97fc8e7-kube-api-access-thbm4\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.975018 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f18abd7-b65d-47d8-af3e-44008988873e-secret-volume\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: E1128 16:12:30.975664 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.475596506 +0000 UTC m=+104.867265047 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.976163 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-registration-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.978737 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-plugins-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.978817 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-mountpoint-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.979004 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/1c7fb7be-980b-491c-87a1-4495b97fc8e7-node-bootstrap-token\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.979811 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d7adf89c-ec5d-478c-b3eb-842a5994aada-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.980842 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7lgn\" (UniqueName: \"kubernetes.io/projected/fcd0148c-c50f-40fd-aac6-6f8226ee81ab-kube-api-access-z7lgn\") pod \"catalog-operator-68c6474976-pn7n5\" (UID: \"fcd0148c-c50f-40fd-aac6-6f8226ee81ab\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.981262 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e6444de3-0e80-40f8-812a-19620725657c-signing-cabundle\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.981323 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-socket-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.981384 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/78fe2f29-be96-4c11-939b-775929f004f3-csi-data-dir\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.981470 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/160c98bc-d26b-4d97-af06-9d0cfbba17a3-config-volume\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.981497 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f18abd7-b65d-47d8-af3e-44008988873e-config-volume\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.982245 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f18abd7-b65d-47d8-af3e-44008988873e-secret-volume\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.985387 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9d51e5f1-a57d-4c66-b8b3-c29c966af005-cert\") pod \"ingress-canary-9wjhp\" (UID: \"9d51e5f1-a57d-4c66-b8b3-c29c966af005\") " pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.986141 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/160c98bc-d26b-4d97-af06-9d0cfbba17a3-metrics-tls\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.986207 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/1c7fb7be-980b-491c-87a1-4495b97fc8e7-certs\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.986611 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e6444de3-0e80-40f8-812a-19620725657c-signing-key\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:30 crc kubenswrapper[4954]: I1128 16:12:30.987915 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d7adf89c-ec5d-478c-b3eb-842a5994aada-proxy-tls\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:30.999973 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-5zg2k" event={"ID":"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7","Type":"ContainerStarted","Data":"5afa70902f31bb9cc25a90cc1eda9d5672790f84dcbb948bedc659d712a2e570"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.000023 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-5zg2k" event={"ID":"2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7","Type":"ContainerStarted","Data":"32dfd392cbd68b5f2951fbadbca7b9a66898e1aff81353d48fadafa584ffac7b"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.000460 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54kfl\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-kube-api-access-54kfl\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.001811 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.006683 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-26cld" event={"ID":"99fb892f-33e1-4ec2-9a56-60461486f2ab","Type":"ContainerStarted","Data":"7e014aef9392db5faeb41cc145d22c86d6f45cd6c39e84702755a48eb6a8541c"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.008497 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" event={"ID":"09c13a85-d025-45f4-b81d-35ee15d4dc50","Type":"ContainerStarted","Data":"6f7dd9b1d333cad79c28406cfa2e9732b89ff2da8288eb05086c94440efbd713"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.013430 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" event={"ID":"39da966e-8c22-4165-81e3-be4df8c3e0d8","Type":"ContainerStarted","Data":"4a2f0ffcc7eae171f9dcfe35cb10b9d1c59533a5a4af8ef213fb3f9ce88622d9"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.015001 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" event={"ID":"090c4f71-7e7b-4a55-a85b-a1802f9bc398","Type":"ContainerStarted","Data":"978e12f6e11bf3f9dbf49c2453848ff67bae070320cb0de93b03a930624234c8"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.016798 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" event={"ID":"db28b581-1553-4c72-a11f-fb4c3d67a33f","Type":"ContainerStarted","Data":"bdf99ee384473f946ab3985744836582b11bc412da204e519c3ed245a0e50780"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.018491 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" event={"ID":"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba","Type":"ContainerStarted","Data":"2b7ddbf9d0ce8fbca4012e3b71157172e5dc90bc7ac30d2f2ffd3e945f5b7880"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.019062 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.021010 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" event={"ID":"9f85396f-69ce-4da8-8b25-2ca4517ff116","Type":"ContainerStarted","Data":"59854eff9276252b47fbc492d9b4e6d558d5b46aa5f3cf6f168541e2632f2485"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.022396 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" event={"ID":"35c6d76c-8581-4697-bef1-dca1ba384f43","Type":"ContainerStarted","Data":"e22fb942ad9fe7c9ecd605b2f9b3235f313abb4ab61ccb44f87fb295b305eab3"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.026142 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-bound-sa-token\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.026416 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" event={"ID":"283afc15-304c-45e8-b9c6-7975cb98c34a","Type":"ContainerStarted","Data":"d758efe5379c060f3be47ce1eb98ec2ae636212b4af8d872e7f1b7c6d2131c86"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.026466 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.034265 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" event={"ID":"1576e0fb-2ebb-475e-abbc-6d4da8aca68c","Type":"ContainerStarted","Data":"0cc7a97f04e910af7c2c9ad655f2c08a13b4622ff00d7e9eeabda96dae4e671a"} Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.034310 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.040771 4954 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-2ckdw container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.040825 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" podUID="76c97c39-16a9-4231-ade3-903f2eb8b4be" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.041201 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4fa1e392-8e18-4d9a-b369-214965705443-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vt9pp\" (UID: \"4fa1e392-8e18-4d9a-b369-214965705443\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.056286 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.075016 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-mrxnm"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.079688 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.080498 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.580483748 +0000 UTC m=+104.972152279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.084166 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-772hj\" (UniqueName: \"kubernetes.io/projected/d7adf89c-ec5d-478c-b3eb-842a5994aada-kube-api-access-772hj\") pod \"machine-config-controller-84d6567774-rhxlq\" (UID: \"d7adf89c-ec5d-478c-b3eb-842a5994aada\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.084550 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.097957 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.106118 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktqq2\" (UniqueName: \"kubernetes.io/projected/9d51e5f1-a57d-4c66-b8b3-c29c966af005-kube-api-access-ktqq2\") pod \"ingress-canary-9wjhp\" (UID: \"9d51e5f1-a57d-4c66-b8b3-c29c966af005\") " pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.119384 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfpt5\" (UniqueName: \"kubernetes.io/projected/78fe2f29-be96-4c11-939b-775929f004f3-kube-api-access-cfpt5\") pod \"csi-hostpathplugin-wbnjk\" (UID: \"78fe2f29-be96-4c11-939b-775929f004f3\") " pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.145142 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.145166 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.169027 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2k2m\" (UniqueName: \"kubernetes.io/projected/160c98bc-d26b-4d97-af06-9d0cfbba17a3-kube-api-access-p2k2m\") pod \"dns-default-sdtpv\" (UID: \"160c98bc-d26b-4d97-af06-9d0cfbba17a3\") " pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.169897 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.172220 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hfc7\" (UniqueName: \"kubernetes.io/projected/2f18abd7-b65d-47d8-af3e-44008988873e-kube-api-access-9hfc7\") pod \"collect-profiles-29405760-q4ftl\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.172297 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.173941 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.184200 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9wjhp" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.184934 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.186608 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.686591959 +0000 UTC m=+105.078260500 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.195390 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvp56\" (UniqueName: \"kubernetes.io/projected/e6444de3-0e80-40f8-812a-19620725657c-kube-api-access-vvp56\") pod \"service-ca-9c57cc56f-2wdfl\" (UID: \"e6444de3-0e80-40f8-812a-19620725657c\") " pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.200150 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.204298 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thbm4\" (UniqueName: \"kubernetes.io/projected/1c7fb7be-980b-491c-87a1-4495b97fc8e7-kube-api-access-thbm4\") pod \"machine-config-server-zmmsk\" (UID: \"1c7fb7be-980b-491c-87a1-4495b97fc8e7\") " pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.213177 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.213193 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.228339 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-kw5b6"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.232816 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.285988 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.286308 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.786296722 +0000 UTC m=+105.177965263 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: W1128 16:12:31.305710 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19107b9e_d11e_4360_a6c8_dfc9e96a8623.slice/crio-890b6b7af04f2236ba1502f84f72f7aeee289c213aabfd0e8640c38f33ef9f77 WatchSource:0}: Error finding container 890b6b7af04f2236ba1502f84f72f7aeee289c213aabfd0e8640c38f33ef9f77: Status 404 returned error can't find the container with id 890b6b7af04f2236ba1502f84f72f7aeee289c213aabfd0e8640c38f33ef9f77 Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.309901 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.310240 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.337438 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.337504 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.387399 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.387741 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.887714202 +0000 UTC m=+105.279382753 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.389321 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.389674 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.889665316 +0000 UTC m=+105.281333857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.472466 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7g4lh"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.487204 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-zmmsk" Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.491480 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.491926 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:31.991906911 +0000 UTC m=+105.383575462 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.593443 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.594107 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.094096515 +0000 UTC m=+105.485765056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.600636 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-f45ch"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.618793 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-ghjjk"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.696395 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.696825 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.196807326 +0000 UTC m=+105.588475867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.797987 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.798723 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.29871267 +0000 UTC m=+105.690381201 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.899242 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.899508 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.399475588 +0000 UTC m=+105.791144129 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.899567 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:31 crc kubenswrapper[4954]: E1128 16:12:31.900002 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.399996095 +0000 UTC m=+105.791664636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.921087 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.963546 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.967748 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr"] Nov 28 16:12:31 crc kubenswrapper[4954]: I1128 16:12:31.996230 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.007446 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.007653 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.507639827 +0000 UTC m=+105.899308368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.007836 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.010838 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.51082187 +0000 UTC m=+105.902490411 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.021411 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.057138 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" event={"ID":"529ebb9e-62c6-4708-a8dd-bbe4092d04fb","Type":"ContainerStarted","Data":"59aecdbe804b9adbcabb28a67087a01b4de40ba31835b9a1b070aac9db74d097"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.078810 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.080703 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" event={"ID":"093dcb08-56c5-475b-b31d-00451f608c85","Type":"ContainerStarted","Data":"1fa7ad0bd753d810eb742c9dc43a10ceb71a9601e731e80cb9f3e55b1b20ae0a"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.083088 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" event={"ID":"39da966e-8c22-4165-81e3-be4df8c3e0d8","Type":"ContainerStarted","Data":"6856b2fb78697540d7f36390801446db018b5f72835e7e847fbf333284cf85a7"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.084554 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2wdfl"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.085217 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" event={"ID":"35c6d76c-8581-4697-bef1-dca1ba384f43","Type":"ContainerStarted","Data":"db163c88390a5474316bc4aa068a70f97db5adc960852648c42e2cfb21c09e0c"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.089571 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" event={"ID":"86f49176-9115-4335-b2b9-2b37af823eca","Type":"ContainerStarted","Data":"3b63cd9a781d142520e1a71e2de2d602f30867a998ac402ae45b356ba13de45e"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.091345 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kw5b6" event={"ID":"ecfd366b-1ead-4b0a-9591-1d69197fe1a3","Type":"ContainerStarted","Data":"9a3b0967ef183bb34e14156ac1d66c858b0a2587bf173dd3bdebb91da69f4978"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.111142 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.111818 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.611804825 +0000 UTC m=+106.003473366 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.114600 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" event={"ID":"9f85396f-69ce-4da8-8b25-2ca4517ff116","Type":"ContainerStarted","Data":"12160bd1a3cfdd4711358595a8265cf250dd16764efabd5b8ce0bea430f38a23"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.114626 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.117798 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" event={"ID":"19107b9e-d11e-4360-a6c8-dfc9e96a8623","Type":"ContainerStarted","Data":"890b6b7af04f2236ba1502f84f72f7aeee289c213aabfd0e8640c38f33ef9f77"} Nov 28 16:12:32 crc kubenswrapper[4954]: W1128 16:12:32.126619 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f4e9b0c_5e6d_4838_8af9_1921e28f4c68.slice/crio-202f6da89dc50d9705d2cbe042344365d92790da4d80d58fe8e7b7aac10689b7 WatchSource:0}: Error finding container 202f6da89dc50d9705d2cbe042344365d92790da4d80d58fe8e7b7aac10689b7: Status 404 returned error can't find the container with id 202f6da89dc50d9705d2cbe042344365d92790da4d80d58fe8e7b7aac10689b7 Nov 28 16:12:32 crc kubenswrapper[4954]: W1128 16:12:32.127201 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcd0148c_c50f_40fd_aac6_6f8226ee81ab.slice/crio-122faac1a5cd535bc286116859cbbe0120e06aaa950c3f79c2c9260ebc98d5c3 WatchSource:0}: Error finding container 122faac1a5cd535bc286116859cbbe0120e06aaa950c3f79c2c9260ebc98d5c3: Status 404 returned error can't find the container with id 122faac1a5cd535bc286116859cbbe0120e06aaa950c3f79c2c9260ebc98d5c3 Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.127709 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" event={"ID":"aaee1061-6c26-48aa-ba83-67ccfbfa67a6","Type":"ContainerStarted","Data":"5276c43e7fc02e62e8b63d81c1b2cb70802f8b34f27fc3b28e69a00a5d5d7fdb"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.155126 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" event={"ID":"090c4f71-7e7b-4a55-a85b-a1802f9bc398","Type":"ContainerStarted","Data":"253f59fb3a5f8f2d9ae2ffbfb3bfe0978e93f45115921058970b419f8d9c26bc"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.155929 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7g4lh" event={"ID":"5aba1d28-543f-4f9b-a25d-ceca24889466","Type":"ContainerStarted","Data":"f7dd274e860c321a9d4e0a647d63481553f5f2234427ac33f66a3d9bc1c013a0"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.159311 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" event={"ID":"2f899c69-b027-4354-a239-cac0f4f33164","Type":"ContainerStarted","Data":"8347e5a413fe56baeae8a580a299ee6d4a1691282bfda7bf398b8371840c2edc"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.160096 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" event={"ID":"31b2aea6-ff5a-4742-8821-b51e2075eea6","Type":"ContainerStarted","Data":"b3153e174e74677345c24dddc3a9e3584c01ba5265d76715ca3350cff42d687f"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.161345 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" event={"ID":"0a163af1-9710-4335-ada6-08578cb822b0","Type":"ContainerStarted","Data":"8fa341beaa3c9d6e45b8cf057b168f146472129874a6886fc7564db65e5d4e60"} Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.163566 4954 patch_prober.go:28] interesting pod/console-operator-58897d9998-kb4kp container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.163595 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" podUID="9f85396f-69ce-4da8-8b25-2ca4517ff116" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.178986 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.190340 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.214298 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.215781 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.715770476 +0000 UTC m=+106.107439017 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.219881 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.299888 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fvflb"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.310799 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.310850 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.317251 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.317561 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.817511435 +0000 UTC m=+106.209179976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.317642 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.319788 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.819780919 +0000 UTC m=+106.211449460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.336394 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-qngll" podStartSLOduration=82.336374117 podStartE2EDuration="1m22.336374117s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:32.335173699 +0000 UTC m=+105.726842240" watchObservedRunningTime="2025-11-28 16:12:32.336374117 +0000 UTC m=+105.728042658" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.400766 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wbnjk"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.422113 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.422394 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:32.922381447 +0000 UTC m=+106.314049988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.425658 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.523442 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.524053 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.024019842 +0000 UTC m=+106.415688383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.624990 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.625181 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.125165673 +0000 UTC m=+106.516834214 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.625260 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.625516 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.125510044 +0000 UTC m=+106.517178585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.701514 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" podStartSLOduration=82.701498239 podStartE2EDuration="1m22.701498239s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:32.697088316 +0000 UTC m=+106.088756857" watchObservedRunningTime="2025-11-28 16:12:32.701498239 +0000 UTC m=+106.093166780" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.726742 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.726920 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.226896172 +0000 UTC m=+106.618564713 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.726962 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.727457 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.22745017 +0000 UTC m=+106.619118711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: W1128 16:12:32.732054 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e51474f_c520_42db_ad78_42f143642a7e.slice/crio-8f1b0c8737f9df20c129657d99f33de4b4c3b1cf230e64d15be5a44102cf74c3 WatchSource:0}: Error finding container 8f1b0c8737f9df20c129657d99f33de4b4c3b1cf230e64d15be5a44102cf74c3: Status 404 returned error can't find the container with id 8f1b0c8737f9df20c129657d99f33de4b4c3b1cf230e64d15be5a44102cf74c3 Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.750380 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-5zg2k" podStartSLOduration=81.750365594 podStartE2EDuration="1m21.750365594s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:32.749497425 +0000 UTC m=+106.141165966" watchObservedRunningTime="2025-11-28 16:12:32.750365594 +0000 UTC m=+106.142034125" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.754635 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.755877 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6xcl5"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.760931 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9wjhp"] Nov 28 16:12:32 crc kubenswrapper[4954]: W1128 16:12:32.762022 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78fe2f29_be96_4c11_939b_775929f004f3.slice/crio-3907e57cd3138b922fc83302b1c9bb5a3f7e894039a8c05f344a49bf1ec9ddd0 WatchSource:0}: Error finding container 3907e57cd3138b922fc83302b1c9bb5a3f7e894039a8c05f344a49bf1ec9ddd0: Status 404 returned error can't find the container with id 3907e57cd3138b922fc83302b1c9bb5a3f7e894039a8c05f344a49bf1ec9ddd0 Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.770189 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq"] Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.780289 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sdtpv"] Nov 28 16:12:32 crc kubenswrapper[4954]: W1128 16:12:32.793344 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8c0d166_30c8_48da_8393_d03c99c1bddd.slice/crio-e5f8626a814fcf4c39c5f961894dd93e19ab169a69ea17c4000c43714a72dd6e WatchSource:0}: Error finding container e5f8626a814fcf4c39c5f961894dd93e19ab169a69ea17c4000c43714a72dd6e: Status 404 returned error can't find the container with id e5f8626a814fcf4c39c5f961894dd93e19ab169a69ea17c4000c43714a72dd6e Nov 28 16:12:32 crc kubenswrapper[4954]: W1128 16:12:32.794563 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fa1e392_8e18_4d9a_b369_214965705443.slice/crio-28ca8947dfccb083d32427e8d02cf74ec743cf5d5f45d6855f63ca9791c183a7 WatchSource:0}: Error finding container 28ca8947dfccb083d32427e8d02cf74ec743cf5d5f45d6855f63ca9791c183a7: Status 404 returned error can't find the container with id 28ca8947dfccb083d32427e8d02cf74ec743cf5d5f45d6855f63ca9791c183a7 Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.831069 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.831420 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.331389631 +0000 UTC m=+106.723058172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.842092 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" podStartSLOduration=82.842076038 podStartE2EDuration="1m22.842076038s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:32.836151465 +0000 UTC m=+106.227819996" watchObservedRunningTime="2025-11-28 16:12:32.842076038 +0000 UTC m=+106.233744579" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.909255 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-sr684" podStartSLOduration=81.909236366 podStartE2EDuration="1m21.909236366s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:32.867215323 +0000 UTC m=+106.258883864" watchObservedRunningTime="2025-11-28 16:12:32.909236366 +0000 UTC m=+106.300904907" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.909348 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" podStartSLOduration=82.909344569 podStartE2EDuration="1m22.909344569s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:32.904976537 +0000 UTC m=+106.296645078" watchObservedRunningTime="2025-11-28 16:12:32.909344569 +0000 UTC m=+106.301013110" Nov 28 16:12:32 crc kubenswrapper[4954]: I1128 16:12:32.932677 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:32 crc kubenswrapper[4954]: E1128 16:12:32.932957 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.432945094 +0000 UTC m=+106.824613635 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.034052 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.034243 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.534211138 +0000 UTC m=+106.925879689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.034372 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.034710 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.534698434 +0000 UTC m=+106.926366975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.135734 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.135868 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.635845875 +0000 UTC m=+107.027514426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.136071 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.136419 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.636408303 +0000 UTC m=+107.028076844 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.167334 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" event={"ID":"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a","Type":"ContainerStarted","Data":"200e90b9b11ba473e2da5bbe926039b313c1dfe033bc7bfa58ae5827e45ab809"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.168487 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" event={"ID":"bd239891-15f6-4d39-bec1-10bb3318c2ee","Type":"ContainerStarted","Data":"bea3fbedd4541a1de252c71dc2a9c6a02c936c47a9a0e20858a4d883d2d3e123"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.169724 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" event={"ID":"e6444de3-0e80-40f8-812a-19620725657c","Type":"ContainerStarted","Data":"487da2b37dec18bfd17dcaa3922cf44be552f658575effc3cea2b3be372b3220"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.170738 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" event={"ID":"8f76f36e-152f-4386-bb8b-79a98d8bbfa6","Type":"ContainerStarted","Data":"319ac6b9780ceae2dc0705afcddb7c3cf89796ff90ea0068376d4ca60a78244b"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.172057 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" event={"ID":"9e51474f-c520-42db-ad78-42f143642a7e","Type":"ContainerStarted","Data":"8f1b0c8737f9df20c129657d99f33de4b4c3b1cf230e64d15be5a44102cf74c3"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.173509 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" event={"ID":"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba","Type":"ContainerStarted","Data":"c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.174777 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" event={"ID":"41770d46-8567-4b2f-b782-910f08b0b373","Type":"ContainerStarted","Data":"025c6c9b6e93599ff5a9d265a26ee2ce6ffc4f1c4e1a0e1d68a810bba203442f"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.176128 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" event={"ID":"283afc15-304c-45e8-b9c6-7975cb98c34a","Type":"ContainerStarted","Data":"2e3c744be337fb85254d6ee95fc3c8ae078f247799c823bd2b31e8d52a23e597"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.176887 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sdtpv" event={"ID":"160c98bc-d26b-4d97-af06-9d0cfbba17a3","Type":"ContainerStarted","Data":"d00e832d1daf6fbbfc9e6547e5c82a8a58469939085dc751dac6334c21968537"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.177747 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" event={"ID":"e8c0d166-30c8-48da-8393-d03c99c1bddd","Type":"ContainerStarted","Data":"e5f8626a814fcf4c39c5f961894dd93e19ab169a69ea17c4000c43714a72dd6e"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.178666 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" event={"ID":"4807ba73-41fb-433c-8097-3fa9ab5b1495","Type":"ContainerStarted","Data":"0128f8fa8f1f9909f45b97855c2433e339f4898d7a58c4967092cf72f85322e4"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.180556 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-zmmsk" event={"ID":"1c7fb7be-980b-491c-87a1-4495b97fc8e7","Type":"ContainerStarted","Data":"d914f64e53b03ef7dcd7b8777f9b1554dd52647a3ca083e88dd93009ff3ce59b"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.181402 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9wjhp" event={"ID":"9d51e5f1-a57d-4c66-b8b3-c29c966af005","Type":"ContainerStarted","Data":"4f7d343daaa8d672c4306af24aeadfd4a58a0d0c596d1de83d0217edffe756ab"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.182714 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" event={"ID":"db28b581-1553-4c72-a11f-fb4c3d67a33f","Type":"ContainerStarted","Data":"8e2ee78f80be5f739a13736ea894733f637c253ff89974f55cfb180b3da3ce0b"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.183800 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" event={"ID":"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68","Type":"ContainerStarted","Data":"202f6da89dc50d9705d2cbe042344365d92790da4d80d58fe8e7b7aac10689b7"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.184911 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" event={"ID":"4fa1e392-8e18-4d9a-b369-214965705443","Type":"ContainerStarted","Data":"28ca8947dfccb083d32427e8d02cf74ec743cf5d5f45d6855f63ca9791c183a7"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.185932 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" event={"ID":"d7adf89c-ec5d-478c-b3eb-842a5994aada","Type":"ContainerStarted","Data":"b910dfd21e31ad2c45f9dfa164c23949b4d345da058ebe5e7cb99763e7a399c5"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.187009 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" event={"ID":"59de1205-9501-4117-a463-b51fbe6c33d1","Type":"ContainerStarted","Data":"301e42ecd0076f442dd507ef6df9d5daf88e7a61b7ce7cdc9a0c65e5d96767d8"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.188071 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" event={"ID":"2f18abd7-b65d-47d8-af3e-44008988873e","Type":"ContainerStarted","Data":"0d79e49b4d84264e0291ffda44e6cac142e498d4101289cefed8f77c792ce59b"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.189036 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" event={"ID":"78fe2f29-be96-4c11-939b-775929f004f3","Type":"ContainerStarted","Data":"3907e57cd3138b922fc83302b1c9bb5a3f7e894039a8c05f344a49bf1ec9ddd0"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.191275 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" event={"ID":"fcd0148c-c50f-40fd-aac6-6f8226ee81ab","Type":"ContainerStarted","Data":"122faac1a5cd535bc286116859cbbe0120e06aaa950c3f79c2c9260ebc98d5c3"} Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.191810 4954 patch_prober.go:28] interesting pod/console-operator-58897d9998-kb4kp container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.191850 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" podUID="9f85396f-69ce-4da8-8b25-2ca4517ff116" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/readyz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.237623 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.237949 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.737923515 +0000 UTC m=+107.129592056 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.238120 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.238574 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.738555405 +0000 UTC m=+107.130223946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.316086 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:33 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:33 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:33 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.316146 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.339158 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.340289 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.840274354 +0000 UTC m=+107.231942895 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.440716 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.440986 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:33.94097502 +0000 UTC m=+107.332643561 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.541738 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.541954 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.041930264 +0000 UTC m=+107.433598805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.542126 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.542494 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.042480592 +0000 UTC m=+107.434149133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.643379 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.643582 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.143556169 +0000 UTC m=+107.535224710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.643678 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.643936 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.143925712 +0000 UTC m=+107.535594253 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.744833 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.745071 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.245040431 +0000 UTC m=+107.636708982 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.745204 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.745562 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.245545977 +0000 UTC m=+107.637214528 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.821012 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.821134 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.846787 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.846966 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.346940176 +0000 UTC m=+107.738608727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.847152 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.847514 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.347499094 +0000 UTC m=+107.739167715 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.948518 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.948693 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.448663794 +0000 UTC m=+107.840332345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:33 crc kubenswrapper[4954]: I1128 16:12:33.948958 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:33 crc kubenswrapper[4954]: E1128 16:12:33.949335 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.449316356 +0000 UTC m=+107.840984917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.050047 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.050631 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.550611501 +0000 UTC m=+107.942280052 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.153178 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.153488 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.653477236 +0000 UTC m=+108.045145777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.196942 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-26cld" event={"ID":"99fb892f-33e1-4ec2-9a56-60461486f2ab","Type":"ContainerStarted","Data":"91a68d22fc91cad1c27c8388191c19a942a40a37f84c10e13e2b60f2276d4fa1"} Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.198970 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" event={"ID":"09c13a85-d025-45f4-b81d-35ee15d4dc50","Type":"ContainerStarted","Data":"3974debb41088528948307a2db93957e849c6e774f90fb2ca75a49b34d67b516"} Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.254223 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.256121 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.756098265 +0000 UTC m=+108.147766886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.312447 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:34 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:34 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:34 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.312516 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.356939 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.357320 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.857304358 +0000 UTC m=+108.248972889 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.457943 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.458409 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:34.958374844 +0000 UTC m=+108.350043385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.559847 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.560202 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.060187077 +0000 UTC m=+108.451855628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.661377 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.661672 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.161637877 +0000 UTC m=+108.553306458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.661994 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.662401 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.162381611 +0000 UTC m=+108.554050192 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.762866 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.763085 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.263058406 +0000 UTC m=+108.654726977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.763212 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.763709 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.263692436 +0000 UTC m=+108.655361017 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.864658 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.864915 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.364858367 +0000 UTC m=+108.756526938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.864976 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.865441 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.365426666 +0000 UTC m=+108.757095217 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.966654 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.966970 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.466919477 +0000 UTC m=+108.858588058 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:34 crc kubenswrapper[4954]: I1128 16:12:34.967015 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:34 crc kubenswrapper[4954]: E1128 16:12:34.967600 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.467577549 +0000 UTC m=+108.859246130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.068459 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.068631 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.568604714 +0000 UTC m=+108.960273275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.068706 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.069049 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.569039649 +0000 UTC m=+108.960708210 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.170173 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.170496 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.670472108 +0000 UTC m=+109.062140689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.170645 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.171028 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.671006386 +0000 UTC m=+109.062674957 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.203928 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" event={"ID":"529ebb9e-62c6-4708-a8dd-bbe4092d04fb","Type":"ContainerStarted","Data":"f0c733b0a5e1fff8f551fc80b212bcc9f4b8befd44caf0d6db41abfab090e975"} Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.220822 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-txr4x" podStartSLOduration=85.22080062 podStartE2EDuration="1m25.22080062s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:35.218799266 +0000 UTC m=+108.610467837" watchObservedRunningTime="2025-11-28 16:12:35.22080062 +0000 UTC m=+108.612469191" Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.272137 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.272368 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.772334962 +0000 UTC m=+109.164003543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.273171 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.273858 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.77383523 +0000 UTC m=+109.165503811 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.313517 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:35 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:35 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:35 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.313625 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.374964 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.375182 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.875161407 +0000 UTC m=+109.266829958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.375323 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.375681 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.875670873 +0000 UTC m=+109.267339434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.476600 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.476824 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.976795973 +0000 UTC m=+109.368464524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.476874 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.477464 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:35.977453204 +0000 UTC m=+109.369121745 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.578439 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.578725 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.078686207 +0000 UTC m=+109.470354758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.578792 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.579210 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.079199204 +0000 UTC m=+109.470867755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.680155 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.680475 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.180431637 +0000 UTC m=+109.572100228 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.781204 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.781554 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.281540546 +0000 UTC m=+109.673209087 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.882214 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.882441 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.382409767 +0000 UTC m=+109.774078338 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.882683 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.883167 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.3831516 +0000 UTC m=+109.774820171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.965573 4954 patch_prober.go:28] interesting pod/apiserver-76f77b778f-hvvf8 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]log ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]etcd ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/generic-apiserver-start-informers ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/max-in-flight-filter ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 28 16:12:35 crc kubenswrapper[4954]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 28 16:12:35 crc kubenswrapper[4954]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/project.openshift.io-projectcache ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/openshift.io-startinformers ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 28 16:12:35 crc kubenswrapper[4954]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 28 16:12:35 crc kubenswrapper[4954]: livez check failed Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.965632 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" podUID="39da966e-8c22-4165-81e3-be4df8c3e0d8" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.983641 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.983810 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.483779885 +0000 UTC m=+109.875448436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:35 crc kubenswrapper[4954]: I1128 16:12:35.984053 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:35 crc kubenswrapper[4954]: E1128 16:12:35.984441 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.484426985 +0000 UTC m=+109.876095526 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.085597 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.085832 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.585772862 +0000 UTC m=+109.977441413 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.085920 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.086283 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.586271588 +0000 UTC m=+109.977940199 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.187452 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.187697 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.687670947 +0000 UTC m=+110.079339488 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.187885 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.188126 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.688115641 +0000 UTC m=+110.079784182 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.217619 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" event={"ID":"aaee1061-6c26-48aa-ba83-67ccfbfa67a6","Type":"ContainerStarted","Data":"19e5e0161d98a3d739f00dab4b6ee5447e8d7eced3659fe39976358a68217ebb"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.233509 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" event={"ID":"78fe2f29-be96-4c11-939b-775929f004f3","Type":"ContainerStarted","Data":"510cccfa81295c24b54ac4b69262498cf4c51c0f3c11ba58b4fdef780357f572"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.257368 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7g4lh" event={"ID":"5aba1d28-543f-4f9b-a25d-ceca24889466","Type":"ContainerStarted","Data":"600d218c70d88be603910c730a37b471fe9d67b87b4a6425da286b5770221056"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.264493 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" event={"ID":"bd239891-15f6-4d39-bec1-10bb3318c2ee","Type":"ContainerStarted","Data":"f6ebdf099316216b669c57c084b33f3e8d09fc19cd8e28f7accf3cd0393616f8"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.266111 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" event={"ID":"1f4e9b0c-5e6d-4838-8af9-1921e28f4c68","Type":"ContainerStarted","Data":"f4ba2f25dd2effcbd2ab16b51d7095aba8d39dea46ddc48357261a623e122644"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.287346 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" event={"ID":"41770d46-8567-4b2f-b782-910f08b0b373","Type":"ContainerStarted","Data":"8c73cea903963e6312c2c596d514212917c9dc7122f451a063fb9f13c0164345"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.288547 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.288804 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.788789156 +0000 UTC m=+110.180457697 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.299828 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kw5b6" event={"ID":"ecfd366b-1ead-4b0a-9591-1d69197fe1a3","Type":"ContainerStarted","Data":"2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.316490 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:36 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:36 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:36 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.316566 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.318112 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" event={"ID":"59de1205-9501-4117-a463-b51fbe6c33d1","Type":"ContainerStarted","Data":"94ac4a2701c67c102e0eb09f928570db335507ba684ac8c061758831abc02249"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.319029 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" event={"ID":"0a163af1-9710-4335-ada6-08578cb822b0","Type":"ContainerStarted","Data":"a404f1ce7f5d827f2a70d221bdcab80a300fe286d35bbd40a3d0d4439c9bc834"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.321581 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" event={"ID":"31b2aea6-ff5a-4742-8821-b51e2075eea6","Type":"ContainerStarted","Data":"2435e35567e9acd1c9018a059753b6c6777c96ebe12ed2bbe7e19cb2315afe23"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.327560 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" event={"ID":"4807ba73-41fb-433c-8097-3fa9ab5b1495","Type":"ContainerStarted","Data":"9aadf606d88692492f84e49d442e50da32bc66507055cc2905665ccc913b9dcc"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.329024 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" event={"ID":"093dcb08-56c5-475b-b31d-00451f608c85","Type":"ContainerStarted","Data":"257c240806aba9968ad2338b6c3ba76d5aadddb98248d662431ee433f45172fe"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.334414 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-zmmsk" event={"ID":"1c7fb7be-980b-491c-87a1-4495b97fc8e7","Type":"ContainerStarted","Data":"69e35977a9f7b31f4ace6652a9960ff5fd31b05584aa5d942a0d2066b3121086"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.339397 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" event={"ID":"6b62bff3-6957-454f-94bc-7a0579e32b2c","Type":"ContainerStarted","Data":"5310eb1ab24c480eb3bc13c8065ff72e312b0320a49633543a12a87dfb04b78c"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.345212 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-b564n" podStartSLOduration=85.345194895 podStartE2EDuration="1m25.345194895s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.343137288 +0000 UTC m=+109.734805829" watchObservedRunningTime="2025-11-28 16:12:36.345194895 +0000 UTC m=+109.736863436" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.355911 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" event={"ID":"8f76f36e-152f-4386-bb8b-79a98d8bbfa6","Type":"ContainerStarted","Data":"d27c397239ef5bce00729fddb82747284222f8eab5eb4ad7811514d9287ecf36"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.363818 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" podStartSLOduration=85.363804109 podStartE2EDuration="1m25.363804109s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.362548578 +0000 UTC m=+109.754217119" watchObservedRunningTime="2025-11-28 16:12:36.363804109 +0000 UTC m=+109.755472650" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.367470 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" event={"ID":"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a","Type":"ContainerStarted","Data":"e57c9104f3d9ca45b7a2c26c43c9cdc3ea0059e52c02c93f3562dd6843e16989"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.380596 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" event={"ID":"4fa1e392-8e18-4d9a-b369-214965705443","Type":"ContainerStarted","Data":"f5f24c3f1adac386e2bc860d1da78ad1e242308933bc1e7404be84203fce2274"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.382267 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" event={"ID":"86f49176-9115-4335-b2b9-2b37af823eca","Type":"ContainerStarted","Data":"5aae80c107a8d37d4359d883a09305c345c5a248383e5493571ac8d4ff706e69"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.389544 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.391003 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.89099018 +0000 UTC m=+110.282658721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.398419 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" event={"ID":"e6444de3-0e80-40f8-812a-19620725657c","Type":"ContainerStarted","Data":"9e211a006b2397f016f0cedffa8ec510ce2f85d832ca00c04675d220aba6562a"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.412575 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-xgj9p" podStartSLOduration=85.412559459 podStartE2EDuration="1m25.412559459s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.389007886 +0000 UTC m=+109.780676427" watchObservedRunningTime="2025-11-28 16:12:36.412559459 +0000 UTC m=+109.804228000" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.419352 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" event={"ID":"9e51474f-c520-42db-ad78-42f143642a7e","Type":"ContainerStarted","Data":"563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.421170 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.431823 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" event={"ID":"2f899c69-b027-4354-a239-cac0f4f33164","Type":"ContainerStarted","Data":"d31e8ea211a9936619ac2ee429662603a9805122e7dfec5af4c26198930c7cbf"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.433109 4954 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fvflb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.433175 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" podUID="9e51474f-c520-42db-ad78-42f143642a7e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.449673 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-c9ck6" podStartSLOduration=85.449649323 podStartE2EDuration="1m25.449649323s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.413923784 +0000 UTC m=+109.805592325" watchObservedRunningTime="2025-11-28 16:12:36.449649323 +0000 UTC m=+109.841317864" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.460723 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" event={"ID":"fcd0148c-c50f-40fd-aac6-6f8226ee81ab","Type":"ContainerStarted","Data":"6b9d048b2acf4c4d8144759d20c465840d3dacce9e685b13ab85f5107d7e678d"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.461547 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.465108 4954 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-pn7n5 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.465152 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" podUID="fcd0148c-c50f-40fd-aac6-6f8226ee81ab" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.466680 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9wjhp" event={"ID":"9d51e5f1-a57d-4c66-b8b3-c29c966af005","Type":"ContainerStarted","Data":"96b66b034182e8cae520b4b7b265bb226ad917aa3f8c17380dfbb208c2f2c94c"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.484280 4954 generic.go:334] "Generic (PLEG): container finished" podID="529ebb9e-62c6-4708-a8dd-bbe4092d04fb" containerID="f0c733b0a5e1fff8f551fc80b212bcc9f4b8befd44caf0d6db41abfab090e975" exitCode=0 Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.484392 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" event={"ID":"529ebb9e-62c6-4708-a8dd-bbe4092d04fb","Type":"ContainerDied","Data":"f0c733b0a5e1fff8f551fc80b212bcc9f4b8befd44caf0d6db41abfab090e975"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.489557 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" podStartSLOduration=85.489537936 podStartE2EDuration="1m25.489537936s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.456446813 +0000 UTC m=+109.848115354" watchObservedRunningTime="2025-11-28 16:12:36.489537936 +0000 UTC m=+109.881206477" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.490792 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" podStartSLOduration=85.490785986 podStartE2EDuration="1m25.490785986s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.488193482 +0000 UTC m=+109.879862033" watchObservedRunningTime="2025-11-28 16:12:36.490785986 +0000 UTC m=+109.882454527" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.491895 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.492205 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.992189502 +0000 UTC m=+110.383858043 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.492360 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.493210 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:36.993196075 +0000 UTC m=+110.384864616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.513302 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" event={"ID":"2f18abd7-b65d-47d8-af3e-44008988873e","Type":"ContainerStarted","Data":"dd92ee1cad6c2ab85fcebd8218d98749c796a250198d22fb99d55a004071a561"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.525273 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" event={"ID":"35c6d76c-8581-4697-bef1-dca1ba384f43","Type":"ContainerStarted","Data":"460dd60c34f0ff7f2353c16cc8c40ab6b0d77d5f85f7f7a50ac1ef09c6381cd3"} Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.526123 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.526714 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.567656 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-48l9m" podStartSLOduration=86.567639799 podStartE2EDuration="1m26.567639799s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.562328097 +0000 UTC m=+109.953996648" watchObservedRunningTime="2025-11-28 16:12:36.567639799 +0000 UTC m=+109.959308340" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.593463 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.593688 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.093667734 +0000 UTC m=+110.485336275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.594182 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.597483 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.097470826 +0000 UTC m=+110.489139477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.658787 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" podStartSLOduration=85.658768184 podStartE2EDuration="1m25.658768184s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.59540535 +0000 UTC m=+109.987073891" watchObservedRunningTime="2025-11-28 16:12:36.658768184 +0000 UTC m=+110.050436725" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.697812 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.698190 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.198176743 +0000 UTC m=+110.589845284 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.799552 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.799904 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.299889942 +0000 UTC m=+110.691558483 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.900193 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.900502 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.900603 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.400587657 +0000 UTC m=+110.792256198 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.900662 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:36 crc kubenswrapper[4954]: E1128 16:12:36.900945 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.400937759 +0000 UTC m=+110.792606300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.928410 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" podStartSLOduration=86.928393659 podStartE2EDuration="1m26.928393659s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:36.660840192 +0000 UTC m=+110.052508733" watchObservedRunningTime="2025-11-28 16:12:36.928393659 +0000 UTC m=+110.320062200" Nov 28 16:12:36 crc kubenswrapper[4954]: I1128 16:12:36.951621 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.001249 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.001408 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.501384446 +0000 UTC m=+110.893052997 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.001715 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.002021 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.502012807 +0000 UTC m=+110.893681348 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.102958 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.103121 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.603105535 +0000 UTC m=+110.994774076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.103270 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.103537 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.603518449 +0000 UTC m=+110.995186990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.216009 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.216303 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.716288805 +0000 UTC m=+111.107957346 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.315037 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:37 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:37 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:37 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.315547 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.319185 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.319460 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.819447461 +0000 UTC m=+111.211116002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.420125 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.420487 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:37.920468637 +0000 UTC m=+111.312137178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.521807 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.522173 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.022159235 +0000 UTC m=+111.413827776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.531398 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" event={"ID":"0e65a9be-8ef8-400b-935d-9ec8a2f51d4a","Type":"ContainerStarted","Data":"47adfe215d772b89c62f687fdd95a02c6541d7ca3c5a58ef106a3a5977c9f8fc"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.532709 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sdtpv" event={"ID":"160c98bc-d26b-4d97-af06-9d0cfbba17a3","Type":"ContainerStarted","Data":"14643149153ac83cbb2fb7d77a48b8a13c0a4321ebf73c6b0fcb3df3c3122cb4"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.532770 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sdtpv" event={"ID":"160c98bc-d26b-4d97-af06-9d0cfbba17a3","Type":"ContainerStarted","Data":"3642fde3b669d64b2e31a9727aa4bd408243815ac7ff3176a7ca6f263167e855"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.532790 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.534744 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" event={"ID":"4807ba73-41fb-433c-8097-3fa9ab5b1495","Type":"ContainerStarted","Data":"0f19a8c6434eeb83ad3e2dc7937f7aaca97a782674dba03e8ed684e890ce8ddc"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.536321 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" event={"ID":"19107b9e-d11e-4360-a6c8-dfc9e96a8623","Type":"ContainerStarted","Data":"811f142f7744f9f1c9f69a1fb5c6046b7006741bc50795e9f1132151c4239c7c"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.537731 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" event={"ID":"41770d46-8567-4b2f-b782-910f08b0b373","Type":"ContainerStarted","Data":"d807dc695215b0e172434c5a6e7537d014df1052fbf524965377a3d2b23a084b"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.537853 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.539125 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-26cld" event={"ID":"99fb892f-33e1-4ec2-9a56-60461486f2ab","Type":"ContainerStarted","Data":"db53168e097a5ef3c28450190d0a57743635892ed4477bb1af0856f2fbc63397"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.540716 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" event={"ID":"283afc15-304c-45e8-b9c6-7975cb98c34a","Type":"ContainerStarted","Data":"f92ac7f19d6e393a5f2c6aa92293c02c4d7f5e65778b5952980f0d630783e57d"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.542653 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" event={"ID":"529ebb9e-62c6-4708-a8dd-bbe4092d04fb","Type":"ContainerStarted","Data":"54893734041a99fa4273b32dca60a9c2ff15a522a5194876f5ef0011732a5709"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.542762 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.544684 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" event={"ID":"86f49176-9115-4335-b2b9-2b37af823eca","Type":"ContainerStarted","Data":"422aa8ae1938938594a0fafb10387eb891adc8ddb76b1b01bd48384fba683da1"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.546194 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" event={"ID":"e8c0d166-30c8-48da-8393-d03c99c1bddd","Type":"ContainerStarted","Data":"ae5a2c774dec0a9c2f01174af465b1dbf6868dfb3cf40c4a1e9e118152cca0c4"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.547690 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" event={"ID":"d7adf89c-ec5d-478c-b3eb-842a5994aada","Type":"ContainerStarted","Data":"d9eadc4c0a120278d6a5225dc8fc0f3c38aa3c44ca6cb7f5fcd3793268887719"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.547716 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" event={"ID":"d7adf89c-ec5d-478c-b3eb-842a5994aada","Type":"ContainerStarted","Data":"bba2b4c04452610e835d073211ab7d2a04a182ec496ca32d06a531cdfbea82df"} Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.548778 4954 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fvflb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.548812 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" podUID="9e51474f-c520-42db-ad78-42f143642a7e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.553770 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-pn7n5" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.558191 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-sdtpv" podStartSLOduration=9.558181323 podStartE2EDuration="9.558181323s" podCreationTimestamp="2025-11-28 16:12:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.556329784 +0000 UTC m=+110.947998325" watchObservedRunningTime="2025-11-28 16:12:37.558181323 +0000 UTC m=+110.949849864" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.602259 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hgqg7" podStartSLOduration=87.602244682 podStartE2EDuration="1m27.602244682s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.578966678 +0000 UTC m=+110.970635219" watchObservedRunningTime="2025-11-28 16:12:37.602244682 +0000 UTC m=+110.993913213" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.602932 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-f45ch" podStartSLOduration=86.602927254 podStartE2EDuration="1m26.602927254s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.600374002 +0000 UTC m=+110.992042543" watchObservedRunningTime="2025-11-28 16:12:37.602927254 +0000 UTC m=+110.994595795" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.625161 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.625567 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.125545888 +0000 UTC m=+111.517214429 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.625967 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.626746 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.126729207 +0000 UTC m=+111.518397808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.662147 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-kw5b6" podStartSLOduration=87.662127724 podStartE2EDuration="1m27.662127724s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.637897539 +0000 UTC m=+111.029566080" watchObservedRunningTime="2025-11-28 16:12:37.662127724 +0000 UTC m=+111.053796265" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.662635 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-26cld" podStartSLOduration=87.662630061 podStartE2EDuration="1m27.662630061s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.659793309 +0000 UTC m=+111.051461850" watchObservedRunningTime="2025-11-28 16:12:37.662630061 +0000 UTC m=+111.054298602" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.696249 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rfwtj" podStartSLOduration=87.69621831 podStartE2EDuration="1m27.69621831s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.685982329 +0000 UTC m=+111.077650870" watchObservedRunningTime="2025-11-28 16:12:37.69621831 +0000 UTC m=+111.087886851" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.720904 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-58fhh" podStartSLOduration=86.72088559 podStartE2EDuration="1m26.72088559s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.720217488 +0000 UTC m=+111.111886029" watchObservedRunningTime="2025-11-28 16:12:37.72088559 +0000 UTC m=+111.112554131" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.732214 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.732722 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.232702293 +0000 UTC m=+111.624370834 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.742168 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-zmmsk" podStartSLOduration=10.742146879 podStartE2EDuration="10.742146879s" podCreationTimestamp="2025-11-28 16:12:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.741182908 +0000 UTC m=+111.132851449" watchObservedRunningTime="2025-11-28 16:12:37.742146879 +0000 UTC m=+111.133815420" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.788318 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vt9pp" podStartSLOduration=86.788300296 podStartE2EDuration="1m26.788300296s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.757836669 +0000 UTC m=+111.149505210" watchObservedRunningTime="2025-11-28 16:12:37.788300296 +0000 UTC m=+111.179968837" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.810826 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" podStartSLOduration=86.810808066 podStartE2EDuration="1m26.810808066s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.792017087 +0000 UTC m=+111.183685628" watchObservedRunningTime="2025-11-28 16:12:37.810808066 +0000 UTC m=+111.202476617" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.833706 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.833998 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.333986198 +0000 UTC m=+111.725654739 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.834801 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4zkjb" podStartSLOduration=87.834782563 podStartE2EDuration="1m27.834782563s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.834120593 +0000 UTC m=+111.225789134" watchObservedRunningTime="2025-11-28 16:12:37.834782563 +0000 UTC m=+111.226451104" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.836756 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" podStartSLOduration=86.836749398 podStartE2EDuration="1m26.836749398s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.818182455 +0000 UTC m=+111.209850996" watchObservedRunningTime="2025-11-28 16:12:37.836749398 +0000 UTC m=+111.228417939" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.880769 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-npvgc" podStartSLOduration=86.880749295 podStartE2EDuration="1m26.880749295s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.880133344 +0000 UTC m=+111.271801885" watchObservedRunningTime="2025-11-28 16:12:37.880749295 +0000 UTC m=+111.272417836" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.882785 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" podStartSLOduration=87.88277402 podStartE2EDuration="1m27.88277402s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.858731591 +0000 UTC m=+111.250400132" watchObservedRunningTime="2025-11-28 16:12:37.88277402 +0000 UTC m=+111.274442571" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.906703 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-9wjhp" podStartSLOduration=10.906688726 podStartE2EDuration="10.906688726s" podCreationTimestamp="2025-11-28 16:12:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.905440825 +0000 UTC m=+111.297109366" watchObservedRunningTime="2025-11-28 16:12:37.906688726 +0000 UTC m=+111.298357267" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.939713 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7g4lh" podStartSLOduration=87.939700596 podStartE2EDuration="1m27.939700596s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.937929029 +0000 UTC m=+111.329597570" watchObservedRunningTime="2025-11-28 16:12:37.939700596 +0000 UTC m=+111.331369137" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.939945 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:37 crc kubenswrapper[4954]: E1128 16:12:37.940282 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.440267755 +0000 UTC m=+111.831936296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.963796 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-c4ms2" podStartSLOduration=87.963778727 podStartE2EDuration="1m27.963778727s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.960622184 +0000 UTC m=+111.352290725" watchObservedRunningTime="2025-11-28 16:12:37.963778727 +0000 UTC m=+111.355447268" Nov 28 16:12:37 crc kubenswrapper[4954]: I1128 16:12:37.984349 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-2wdfl" podStartSLOduration=86.984333584 podStartE2EDuration="1m26.984333584s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:37.983706223 +0000 UTC m=+111.375374764" watchObservedRunningTime="2025-11-28 16:12:37.984333584 +0000 UTC m=+111.376002125" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.030075 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" podStartSLOduration=87.030054287 podStartE2EDuration="1m27.030054287s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:38.029028743 +0000 UTC m=+111.420697284" watchObservedRunningTime="2025-11-28 16:12:38.030054287 +0000 UTC m=+111.421722818" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.040964 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.041272 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.54126158 +0000 UTC m=+111.932930121 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.053664 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" podStartSLOduration=87.053652992 podStartE2EDuration="1m27.053652992s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:38.051514172 +0000 UTC m=+111.443182713" watchObservedRunningTime="2025-11-28 16:12:38.053652992 +0000 UTC m=+111.445321533" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.084560 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-ghjjk" podStartSLOduration=88.084544223 podStartE2EDuration="1m28.084544223s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:38.081550856 +0000 UTC m=+111.473219397" watchObservedRunningTime="2025-11-28 16:12:38.084544223 +0000 UTC m=+111.476212764" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.142989 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.143156 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.643131703 +0000 UTC m=+112.034800244 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.143377 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.143727 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.643710732 +0000 UTC m=+112.035379273 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.243939 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.244228 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.744215972 +0000 UTC m=+112.135884513 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.313392 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:38 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:38 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:38 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.313437 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.345498 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.345813 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.845803027 +0000 UTC m=+112.237471568 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.446943 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.448333 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:38.94830495 +0000 UTC m=+112.339973481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.548294 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.549228 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.049213173 +0000 UTC m=+112.440881714 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.553678 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" event={"ID":"e8c0d166-30c8-48da-8393-d03c99c1bddd","Type":"ContainerStarted","Data":"e2a96024cf63267cca25238bb0dec97b6cefad148a46193c535afb00feb51f24"} Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.556295 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-mrxnm" event={"ID":"19107b9e-d11e-4360-a6c8-dfc9e96a8623","Type":"ContainerStarted","Data":"4374a4e8f6ef4a00639b0bd4c15c9d522416126c956b2e3f694c507db4085017"} Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.558337 4954 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fvflb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.558371 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" podUID="9e51474f-c520-42db-ad78-42f143642a7e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.595576 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-6xcl5" podStartSLOduration=87.595559246 podStartE2EDuration="1m27.595559246s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:38.593079786 +0000 UTC m=+111.984748327" watchObservedRunningTime="2025-11-28 16:12:38.595559246 +0000 UTC m=+111.987227787" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.640223 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-mrxnm" podStartSLOduration=88.640208784 podStartE2EDuration="1m28.640208784s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:38.636742982 +0000 UTC m=+112.028411523" watchObservedRunningTime="2025-11-28 16:12:38.640208784 +0000 UTC m=+112.031877325" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.640419 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-rhxlq" podStartSLOduration=87.640415861 podStartE2EDuration="1m27.640415861s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:38.613659113 +0000 UTC m=+112.005327654" watchObservedRunningTime="2025-11-28 16:12:38.640415861 +0000 UTC m=+112.032084402" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.650095 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.654374 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.154360013 +0000 UTC m=+112.546028554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.752645 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.753044 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.253027552 +0000 UTC m=+112.644696083 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.793627 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-j85wr" podStartSLOduration=87.793613239 podStartE2EDuration="1m27.793613239s" podCreationTimestamp="2025-11-28 16:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:38.678214146 +0000 UTC m=+112.069882687" watchObservedRunningTime="2025-11-28 16:12:38.793613239 +0000 UTC m=+112.185281780" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.793932 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tw7lr"] Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.794788 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.798454 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.812950 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tw7lr"] Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.831224 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.843255 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-hvvf8" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.869915 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.870085 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-utilities\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.870107 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtcrj\" (UniqueName: \"kubernetes.io/projected/0563a539-f9ae-4640-a7f2-68027936fe45-kube-api-access-gtcrj\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.870189 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-catalog-content\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.870264 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.370250665 +0000 UTC m=+112.761919206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.902375 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.904658 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.914158 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.971058 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.971093 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-catalog-content\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.971163 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-utilities\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.971181 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtcrj\" (UniqueName: \"kubernetes.io/projected/0563a539-f9ae-4640-a7f2-68027936fe45-kube-api-access-gtcrj\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: E1128 16:12:38.972130 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.472119928 +0000 UTC m=+112.863788469 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.972577 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-catalog-content\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:38 crc kubenswrapper[4954]: I1128 16:12:38.973452 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-utilities\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.005032 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f5sl2"] Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.007325 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.020155 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.041791 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtcrj\" (UniqueName: \"kubernetes.io/projected/0563a539-f9ae-4640-a7f2-68027936fe45-kube-api-access-gtcrj\") pod \"certified-operators-tw7lr\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.045162 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f5sl2"] Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.072321 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.072469 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.572448141 +0000 UTC m=+112.964116682 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.072611 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.072989 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.572970829 +0000 UTC m=+112.964639370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.126875 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.173377 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.173542 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.673506519 +0000 UTC m=+113.065175060 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.173898 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.173942 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-utilities\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.174016 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdknf\" (UniqueName: \"kubernetes.io/projected/500e43fe-7466-45bd-ab6c-9f357da02385-kube-api-access-gdknf\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.174226 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-catalog-content\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.174392 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.674373757 +0000 UTC m=+113.066042298 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.206889 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5lwj2"] Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.207810 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.224047 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5lwj2"] Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.274942 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.275132 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.775090194 +0000 UTC m=+113.166758735 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.275187 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-utilities\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.275289 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdknf\" (UniqueName: \"kubernetes.io/projected/500e43fe-7466-45bd-ab6c-9f357da02385-kube-api-access-gdknf\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.275618 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-utilities\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.275689 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-catalog-content\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.275987 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-catalog-content\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.329378 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:39 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:39 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:39 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.329431 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.339460 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdknf\" (UniqueName: \"kubernetes.io/projected/500e43fe-7466-45bd-ab6c-9f357da02385-kube-api-access-gdknf\") pod \"community-operators-f5sl2\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.358595 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.386734 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.386952 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-catalog-content\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.387004 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ql8h\" (UniqueName: \"kubernetes.io/projected/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-kube-api-access-6ql8h\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.387026 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-utilities\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.387354 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.887337854 +0000 UTC m=+113.279006395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.468545 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l49q6"] Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.469973 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.470904 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l49q6"] Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.488048 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.488384 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-catalog-content\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.488480 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.988451483 +0000 UTC m=+113.380120024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.488579 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ql8h\" (UniqueName: \"kubernetes.io/projected/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-kube-api-access-6ql8h\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.488606 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-utilities\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.488671 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.488807 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-catalog-content\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.489015 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:39.98900675 +0000 UTC m=+113.380675291 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.489280 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-utilities\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.528495 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ql8h\" (UniqueName: \"kubernetes.io/projected/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-kube-api-access-6ql8h\") pod \"certified-operators-5lwj2\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.574831 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.591103 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.591267 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgsls\" (UniqueName: \"kubernetes.io/projected/a6c3823f-faef-4429-b6a0-a9c8678b7358-kube-api-access-xgsls\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.591289 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-catalog-content\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.591348 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-utilities\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.592005 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.091989041 +0000 UTC m=+113.483657582 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.622777 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" event={"ID":"78fe2f29-be96-4c11-939b-775929f004f3","Type":"ContainerStarted","Data":"15bd5e66ef17f1ff9e6caa62ef0d0b834ba0368d87c0586d7c1d25f937a64307"} Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.622823 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" event={"ID":"78fe2f29-be96-4c11-939b-775929f004f3","Type":"ContainerStarted","Data":"235fae3b1b2d6ec6fa9181056398447b350dc860030cdd8eabe18f6747ed64ab"} Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.646429 4954 generic.go:334] "Generic (PLEG): container finished" podID="2f18abd7-b65d-47d8-af3e-44008988873e" containerID="dd92ee1cad6c2ab85fcebd8218d98749c796a250198d22fb99d55a004071a561" exitCode=0 Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.647724 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" event={"ID":"2f18abd7-b65d-47d8-af3e-44008988873e","Type":"ContainerDied","Data":"dd92ee1cad6c2ab85fcebd8218d98749c796a250198d22fb99d55a004071a561"} Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.666837 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wlzmv" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.692809 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.692856 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgsls\" (UniqueName: \"kubernetes.io/projected/a6c3823f-faef-4429-b6a0-a9c8678b7358-kube-api-access-xgsls\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.692875 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-catalog-content\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.692933 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-utilities\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.693274 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-utilities\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.693510 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.193500942 +0000 UTC m=+113.585169483 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.693971 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-catalog-content\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.709110 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tw7lr"] Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.713264 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgsls\" (UniqueName: \"kubernetes.io/projected/a6c3823f-faef-4429-b6a0-a9c8678b7358-kube-api-access-xgsls\") pod \"community-operators-l49q6\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.763450 4954 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.793705 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.795051 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.295034535 +0000 UTC m=+113.686703076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.824267 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:12:39 crc kubenswrapper[4954]: I1128 16:12:39.895607 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:39 crc kubenswrapper[4954]: E1128 16:12:39.895939 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.395927707 +0000 UTC m=+113.787596248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-w96mt" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:39.997030 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4954]: E1128 16:12:39.998285 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 16:12:40.498270697 +0000 UTC m=+113.889939238 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.034602 4954 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T16:12:39.763682618Z","Handler":null,"Name":""} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.082680 4954 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.082716 4954 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.085916 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f5sl2"] Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.098685 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.133872 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5lwj2"] Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.272232 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-kb4kp" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.275429 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l49q6"] Nov 28 16:12:40 crc kubenswrapper[4954]: W1128 16:12:40.279926 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6c3823f_faef_4429_b6a0_a9c8678b7358.slice/crio-88392424aa746a96b2becac10029bd59780d741b637058f493b020bf3ea77eb1 WatchSource:0}: Error finding container 88392424aa746a96b2becac10029bd59780d741b637058f493b020bf3ea77eb1: Status 404 returned error can't find the container with id 88392424aa746a96b2becac10029bd59780d741b637058f493b020bf3ea77eb1 Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.313547 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.316975 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:40 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:40 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:40 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.317034 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.395332 4954 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.395636 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.425237 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-w96mt\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.502915 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.509729 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.577692 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.595978 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.596034 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.598553 4954 patch_prober.go:28] interesting pod/console-f9d7485db-kw5b6 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.598593 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-kw5b6" podUID="ecfd366b-1ead-4b0a-9591-1d69197fe1a3" containerName="console" probeResult="failure" output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.656298 4954 generic.go:334] "Generic (PLEG): container finished" podID="0563a539-f9ae-4640-a7f2-68027936fe45" containerID="b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48" exitCode=0 Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.656376 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tw7lr" event={"ID":"0563a539-f9ae-4640-a7f2-68027936fe45","Type":"ContainerDied","Data":"b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.656410 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tw7lr" event={"ID":"0563a539-f9ae-4640-a7f2-68027936fe45","Type":"ContainerStarted","Data":"957c1030f4ee4f3a287d457e151d361c42717954991d3f8ffb9f32e52eb823b9"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.667262 4954 generic.go:334] "Generic (PLEG): container finished" podID="500e43fe-7466-45bd-ab6c-9f357da02385" containerID="07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500" exitCode=0 Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.667317 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5sl2" event={"ID":"500e43fe-7466-45bd-ab6c-9f357da02385","Type":"ContainerDied","Data":"07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.667366 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5sl2" event={"ID":"500e43fe-7466-45bd-ab6c-9f357da02385","Type":"ContainerStarted","Data":"31163536d74c561b06ea4a2835429d892e3d211519ab6efd8dda5b1c2dbb137a"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.689321 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7g4lh" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.696022 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.697438 4954 patch_prober.go:28] interesting pod/downloads-7954f5f757-7g4lh container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.697606 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7g4lh" podUID="5aba1d28-543f-4f9b-a25d-ceca24889466" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.697497 4954 patch_prober.go:28] interesting pod/downloads-7954f5f757-7g4lh container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.697888 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7g4lh" podUID="5aba1d28-543f-4f9b-a25d-ceca24889466" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.698054 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" event={"ID":"78fe2f29-be96-4c11-939b-775929f004f3","Type":"ContainerStarted","Data":"3c9d7af9014d0f73a62b2c3de2ca2fc09e4851843c59595ce0f1437d928f7b40"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.705563 4954 patch_prober.go:28] interesting pod/downloads-7954f5f757-7g4lh container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" start-of-body= Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.705619 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7g4lh" podUID="5aba1d28-543f-4f9b-a25d-ceca24889466" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.31:8080/\": dial tcp 10.217.0.31:8080: connect: connection refused" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.710908 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l49q6" event={"ID":"a6c3823f-faef-4429-b6a0-a9c8678b7358","Type":"ContainerStarted","Data":"9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.710965 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l49q6" event={"ID":"a6c3823f-faef-4429-b6a0-a9c8678b7358","Type":"ContainerStarted","Data":"88392424aa746a96b2becac10029bd59780d741b637058f493b020bf3ea77eb1"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.725748 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5lwj2" event={"ID":"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6","Type":"ContainerStarted","Data":"a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.725805 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5lwj2" event={"ID":"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6","Type":"ContainerStarted","Data":"77b02e4ba0114d00099df49bd7b06db2e8337dc8368c1f6a3f9d8e3c329ddf1e"} Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.751828 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.760122 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-v4786" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.777077 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.789209 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-4zwww" Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.860691 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w96mt"] Nov 28 16:12:40 crc kubenswrapper[4954]: I1128 16:12:40.998664 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8zxgp"] Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.000755 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.003861 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8zxgp"] Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.007798 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.032571 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.032994 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.116232 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f18abd7-b65d-47d8-af3e-44008988873e-config-volume\") pod \"2f18abd7-b65d-47d8-af3e-44008988873e\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.116298 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f18abd7-b65d-47d8-af3e-44008988873e-secret-volume\") pod \"2f18abd7-b65d-47d8-af3e-44008988873e\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.116441 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hfc7\" (UniqueName: \"kubernetes.io/projected/2f18abd7-b65d-47d8-af3e-44008988873e-kube-api-access-9hfc7\") pod \"2f18abd7-b65d-47d8-af3e-44008988873e\" (UID: \"2f18abd7-b65d-47d8-af3e-44008988873e\") " Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.116641 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-utilities\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.116664 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz96t\" (UniqueName: \"kubernetes.io/projected/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-kube-api-access-rz96t\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.116756 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-catalog-content\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.116867 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f18abd7-b65d-47d8-af3e-44008988873e-config-volume" (OuterVolumeSpecName: "config-volume") pod "2f18abd7-b65d-47d8-af3e-44008988873e" (UID: "2f18abd7-b65d-47d8-af3e-44008988873e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.129734 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f18abd7-b65d-47d8-af3e-44008988873e-kube-api-access-9hfc7" (OuterVolumeSpecName: "kube-api-access-9hfc7") pod "2f18abd7-b65d-47d8-af3e-44008988873e" (UID: "2f18abd7-b65d-47d8-af3e-44008988873e"). InnerVolumeSpecName "kube-api-access-9hfc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.129753 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f18abd7-b65d-47d8-af3e-44008988873e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2f18abd7-b65d-47d8-af3e-44008988873e" (UID: "2f18abd7-b65d-47d8-af3e-44008988873e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.218287 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-catalog-content\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.218373 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-utilities\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.218391 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz96t\" (UniqueName: \"kubernetes.io/projected/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-kube-api-access-rz96t\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.218441 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hfc7\" (UniqueName: \"kubernetes.io/projected/2f18abd7-b65d-47d8-af3e-44008988873e-kube-api-access-9hfc7\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.218453 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f18abd7-b65d-47d8-af3e-44008988873e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.218464 4954 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f18abd7-b65d-47d8-af3e-44008988873e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.218863 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-catalog-content\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.219104 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-utilities\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.244302 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz96t\" (UniqueName: \"kubernetes.io/projected/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-kube-api-access-rz96t\") pod \"redhat-marketplace-8zxgp\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.311980 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:41 crc kubenswrapper[4954]: [-]has-synced failed: reason withheld Nov 28 16:12:41 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:41 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.312043 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.345827 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.392850 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ct8cs"] Nov 28 16:12:41 crc kubenswrapper[4954]: E1128 16:12:41.393079 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f18abd7-b65d-47d8-af3e-44008988873e" containerName="collect-profiles" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.393107 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f18abd7-b65d-47d8-af3e-44008988873e" containerName="collect-profiles" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.393205 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f18abd7-b65d-47d8-af3e-44008988873e" containerName="collect-profiles" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.393875 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.408205 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ct8cs"] Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.522670 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-catalog-content\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.522712 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-utilities\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.522738 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjznp\" (UniqueName: \"kubernetes.io/projected/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-kube-api-access-xjznp\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.565328 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8zxgp"] Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.623582 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-catalog-content\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.623733 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-utilities\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.623833 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjznp\" (UniqueName: \"kubernetes.io/projected/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-kube-api-access-xjznp\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.624165 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-catalog-content\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.624205 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-utilities\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.641253 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjznp\" (UniqueName: \"kubernetes.io/projected/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-kube-api-access-xjznp\") pod \"redhat-marketplace-ct8cs\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.709094 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.710490 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.734412 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" event={"ID":"2f18abd7-b65d-47d8-af3e-44008988873e","Type":"ContainerDied","Data":"0d79e49b4d84264e0291ffda44e6cac142e498d4101289cefed8f77c792ce59b"} Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.734441 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.734450 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d79e49b4d84264e0291ffda44e6cac142e498d4101289cefed8f77c792ce59b" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.736548 4954 generic.go:334] "Generic (PLEG): container finished" podID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerID="a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1" exitCode=0 Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.736676 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5lwj2" event={"ID":"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6","Type":"ContainerDied","Data":"a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1"} Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.742719 4954 generic.go:334] "Generic (PLEG): container finished" podID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerID="9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c" exitCode=0 Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.742766 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l49q6" event={"ID":"a6c3823f-faef-4429-b6a0-a9c8678b7358","Type":"ContainerDied","Data":"9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c"} Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.756460 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" event={"ID":"d42a5371-0448-48d1-8df8-aee818ea6644","Type":"ContainerStarted","Data":"cef4a51e34bd09df4ec25fb5129bfacc5984f1b281439845e443316f516f16a7"} Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.756540 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" event={"ID":"d42a5371-0448-48d1-8df8-aee818ea6644","Type":"ContainerStarted","Data":"89422d44d84e86fbcc87edf1ab0f1be1e731872f2807c5f0d4e3414f5de0b48e"} Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.767790 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerStarted","Data":"69426fa0467cf83d07a844ef5a706048de18595c815680a8abe917baa34be7b8"} Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.888947 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.931490 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.932503 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.946020 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.946318 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 16:12:41 crc kubenswrapper[4954]: I1128 16:12:41.951899 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.019220 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xrnc2"] Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.032673 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.036971 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" podStartSLOduration=92.036952202 podStartE2EDuration="1m32.036952202s" podCreationTimestamp="2025-11-28 16:11:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:42.026797303 +0000 UTC m=+115.418465844" watchObservedRunningTime="2025-11-28 16:12:42.036952202 +0000 UTC m=+115.428620743" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.039219 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/187ace09-0627-4442-95b3-0e16a7a5eb93-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.039236 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.039299 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/187ace09-0627-4442-95b3-0e16a7a5eb93-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.039887 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xrnc2"] Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.067048 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-wbnjk" podStartSLOduration=15.067033588 podStartE2EDuration="15.067033588s" podCreationTimestamp="2025-11-28 16:12:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:42.05726162 +0000 UTC m=+115.448930161" watchObservedRunningTime="2025-11-28 16:12:42.067033588 +0000 UTC m=+115.458702129" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.085254 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ct8cs"] Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.154380 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/187ace09-0627-4442-95b3-0e16a7a5eb93-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.154454 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-catalog-content\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.154499 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqgt2\" (UniqueName: \"kubernetes.io/projected/0a39712c-ebc2-4a9a-8205-537d4d00fae7-kube-api-access-fqgt2\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.154604 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-utilities\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.154632 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/187ace09-0627-4442-95b3-0e16a7a5eb93-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.154718 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/187ace09-0627-4442-95b3-0e16a7a5eb93-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.181053 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/187ace09-0627-4442-95b3-0e16a7a5eb93-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.256305 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-utilities\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.256418 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-catalog-content\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.256445 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqgt2\" (UniqueName: \"kubernetes.io/projected/0a39712c-ebc2-4a9a-8205-537d4d00fae7-kube-api-access-fqgt2\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.256846 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-utilities\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.256907 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-catalog-content\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.276291 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqgt2\" (UniqueName: \"kubernetes.io/projected/0a39712c-ebc2-4a9a-8205-537d4d00fae7-kube-api-access-fqgt2\") pod \"redhat-operators-xrnc2\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.297387 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.316107 4954 patch_prober.go:28] interesting pod/router-default-5444994796-5zg2k container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 16:12:42 crc kubenswrapper[4954]: [+]has-synced ok Nov 28 16:12:42 crc kubenswrapper[4954]: [+]process-running ok Nov 28 16:12:42 crc kubenswrapper[4954]: healthz check failed Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.316167 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-5zg2k" podUID="2ed5b5e7-5b12-49fb-96b7-c52dbb8531d7" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.348475 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.398918 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zjx9w"] Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.401801 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.408062 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zjx9w"] Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.460843 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6w7r\" (UniqueName: \"kubernetes.io/projected/1e1da481-691f-4d64-8229-c8846cd6a778-kube-api-access-x6w7r\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.460902 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-utilities\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.460923 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-catalog-content\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.564098 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6w7r\" (UniqueName: \"kubernetes.io/projected/1e1da481-691f-4d64-8229-c8846cd6a778-kube-api-access-x6w7r\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.564156 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-utilities\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.564177 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-catalog-content\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.564648 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-catalog-content\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.564804 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-utilities\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.585679 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6w7r\" (UniqueName: \"kubernetes.io/projected/1e1da481-691f-4d64-8229-c8846cd6a778-kube-api-access-x6w7r\") pod \"redhat-operators-zjx9w\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.595859 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-hk8pv" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.622676 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xrnc2"] Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.724789 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.736309 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 16:12:42 crc kubenswrapper[4954]: W1128 16:12:42.740397 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod187ace09_0627_4442_95b3_0e16a7a5eb93.slice/crio-aad0b3aca7a19cf1df9559752259eb2e24ec9f889680f9b12815f0a44251cb1f WatchSource:0}: Error finding container aad0b3aca7a19cf1df9559752259eb2e24ec9f889680f9b12815f0a44251cb1f: Status 404 returned error can't find the container with id aad0b3aca7a19cf1df9559752259eb2e24ec9f889680f9b12815f0a44251cb1f Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.809783 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ct8cs" event={"ID":"e7ab6809-c0bf-4b18-a7c8-9babc98adec1","Type":"ContainerStarted","Data":"e70de9f0cba66dfbf16ee743342c8cfed72916e09eedbe79a6aec81e4107c769"} Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.838441 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"187ace09-0627-4442-95b3-0e16a7a5eb93","Type":"ContainerStarted","Data":"aad0b3aca7a19cf1df9559752259eb2e24ec9f889680f9b12815f0a44251cb1f"} Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.842857 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xrnc2" event={"ID":"0a39712c-ebc2-4a9a-8205-537d4d00fae7","Type":"ContainerStarted","Data":"a44ef0acaffca54a08b8152d1d389fabeb17b106a2b5b7d88d8b4df627eb0ef8"} Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.865156 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerStarted","Data":"bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53"} Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.865302 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:12:42 crc kubenswrapper[4954]: I1128 16:12:42.967149 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zjx9w"] Nov 28 16:12:42 crc kubenswrapper[4954]: W1128 16:12:42.992997 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e1da481_691f_4d64_8229_c8846cd6a778.slice/crio-643f161019ecceee86fc83812fff68c0bef9b8941ca07b7189ec877706aa4268 WatchSource:0}: Error finding container 643f161019ecceee86fc83812fff68c0bef9b8941ca07b7189ec877706aa4268: Status 404 returned error can't find the container with id 643f161019ecceee86fc83812fff68c0bef9b8941ca07b7189ec877706aa4268 Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.311507 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.315992 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-5zg2k" Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.869232 4954 generic.go:334] "Generic (PLEG): container finished" podID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerID="f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d" exitCode=0 Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.869286 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xrnc2" event={"ID":"0a39712c-ebc2-4a9a-8205-537d4d00fae7","Type":"ContainerDied","Data":"f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d"} Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.871271 4954 generic.go:334] "Generic (PLEG): container finished" podID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerID="bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53" exitCode=0 Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.871321 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerDied","Data":"bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53"} Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.872719 4954 generic.go:334] "Generic (PLEG): container finished" podID="1e1da481-691f-4d64-8229-c8846cd6a778" containerID="a9047fec4c7dbc09be5959d7fe64d645904028c7a63e0971d260bb088618c586" exitCode=0 Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.872917 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjx9w" event={"ID":"1e1da481-691f-4d64-8229-c8846cd6a778","Type":"ContainerDied","Data":"a9047fec4c7dbc09be5959d7fe64d645904028c7a63e0971d260bb088618c586"} Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.872957 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjx9w" event={"ID":"1e1da481-691f-4d64-8229-c8846cd6a778","Type":"ContainerStarted","Data":"643f161019ecceee86fc83812fff68c0bef9b8941ca07b7189ec877706aa4268"} Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.878832 4954 generic.go:334] "Generic (PLEG): container finished" podID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerID="20dffeeb4ab7a48399ac640962fcdcc2e09a5c42a6f687d4559485df5c7dc954" exitCode=0 Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.878908 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ct8cs" event={"ID":"e7ab6809-c0bf-4b18-a7c8-9babc98adec1","Type":"ContainerDied","Data":"20dffeeb4ab7a48399ac640962fcdcc2e09a5c42a6f687d4559485df5c7dc954"} Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.882788 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"187ace09-0627-4442-95b3-0e16a7a5eb93","Type":"ContainerStarted","Data":"e68b6eac6ad3875a9e7d16be38bc99334eb1640ef1ce8932713269a5210b6743"} Nov 28 16:12:43 crc kubenswrapper[4954]: I1128 16:12:43.968952 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.968933267 podStartE2EDuration="2.968933267s" podCreationTimestamp="2025-11-28 16:12:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:43.960315147 +0000 UTC m=+117.351983688" watchObservedRunningTime="2025-11-28 16:12:43.968933267 +0000 UTC m=+117.360601798" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.591814 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.593263 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.600578 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.601648 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.604198 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.756315 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c042e57-9d00-445c-aacd-590669eb05e1-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.756413 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c042e57-9d00-445c-aacd-590669eb05e1-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.857357 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c042e57-9d00-445c-aacd-590669eb05e1-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.857461 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c042e57-9d00-445c-aacd-590669eb05e1-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.857584 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c042e57-9d00-445c-aacd-590669eb05e1-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.912049 4954 generic.go:334] "Generic (PLEG): container finished" podID="187ace09-0627-4442-95b3-0e16a7a5eb93" containerID="e68b6eac6ad3875a9e7d16be38bc99334eb1640ef1ce8932713269a5210b6743" exitCode=0 Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.912126 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"187ace09-0627-4442-95b3-0e16a7a5eb93","Type":"ContainerDied","Data":"e68b6eac6ad3875a9e7d16be38bc99334eb1640ef1ce8932713269a5210b6743"} Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.916107 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c042e57-9d00-445c-aacd-590669eb05e1-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:44 crc kubenswrapper[4954]: I1128 16:12:44.963992 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:45 crc kubenswrapper[4954]: I1128 16:12:45.221975 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 16:12:45 crc kubenswrapper[4954]: W1128 16:12:45.237903 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod7c042e57_9d00_445c_aacd_590669eb05e1.slice/crio-1633ad2ef7dd2cbfc05fc7da7e7de382d34ecb95afd477f38a5d39f88dcd981b WatchSource:0}: Error finding container 1633ad2ef7dd2cbfc05fc7da7e7de382d34ecb95afd477f38a5d39f88dcd981b: Status 404 returned error can't find the container with id 1633ad2ef7dd2cbfc05fc7da7e7de382d34ecb95afd477f38a5d39f88dcd981b Nov 28 16:12:45 crc kubenswrapper[4954]: I1128 16:12:45.941098 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7c042e57-9d00-445c-aacd-590669eb05e1","Type":"ContainerStarted","Data":"28fa3265c36810d28fd387d18b445c36e1e3d53d82e2ef7e98f4b2b25ea33885"} Nov 28 16:12:45 crc kubenswrapper[4954]: I1128 16:12:45.941423 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7c042e57-9d00-445c-aacd-590669eb05e1","Type":"ContainerStarted","Data":"1633ad2ef7dd2cbfc05fc7da7e7de382d34ecb95afd477f38a5d39f88dcd981b"} Nov 28 16:12:45 crc kubenswrapper[4954]: I1128 16:12:45.955475 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=1.955465081 podStartE2EDuration="1.955465081s" podCreationTimestamp="2025-11-28 16:12:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:12:45.954267132 +0000 UTC m=+119.345935673" watchObservedRunningTime="2025-11-28 16:12:45.955465081 +0000 UTC m=+119.347133622" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.222249 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-sdtpv" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.287813 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.489817 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/187ace09-0627-4442-95b3-0e16a7a5eb93-kubelet-dir\") pod \"187ace09-0627-4442-95b3-0e16a7a5eb93\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.489910 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/187ace09-0627-4442-95b3-0e16a7a5eb93-kube-api-access\") pod \"187ace09-0627-4442-95b3-0e16a7a5eb93\" (UID: \"187ace09-0627-4442-95b3-0e16a7a5eb93\") " Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.489936 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/187ace09-0627-4442-95b3-0e16a7a5eb93-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "187ace09-0627-4442-95b3-0e16a7a5eb93" (UID: "187ace09-0627-4442-95b3-0e16a7a5eb93"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.490407 4954 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/187ace09-0627-4442-95b3-0e16a7a5eb93-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.501840 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/187ace09-0627-4442-95b3-0e16a7a5eb93-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "187ace09-0627-4442-95b3-0e16a7a5eb93" (UID: "187ace09-0627-4442-95b3-0e16a7a5eb93"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.591395 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/187ace09-0627-4442-95b3-0e16a7a5eb93-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.951080 4954 generic.go:334] "Generic (PLEG): container finished" podID="7c042e57-9d00-445c-aacd-590669eb05e1" containerID="28fa3265c36810d28fd387d18b445c36e1e3d53d82e2ef7e98f4b2b25ea33885" exitCode=0 Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.951157 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7c042e57-9d00-445c-aacd-590669eb05e1","Type":"ContainerDied","Data":"28fa3265c36810d28fd387d18b445c36e1e3d53d82e2ef7e98f4b2b25ea33885"} Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.961999 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"187ace09-0627-4442-95b3-0e16a7a5eb93","Type":"ContainerDied","Data":"aad0b3aca7a19cf1df9559752259eb2e24ec9f889680f9b12815f0a44251cb1f"} Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.962039 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aad0b3aca7a19cf1df9559752259eb2e24ec9f889680f9b12815f0a44251cb1f" Nov 28 16:12:46 crc kubenswrapper[4954]: I1128 16:12:46.962093 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.257823 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.422741 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c042e57-9d00-445c-aacd-590669eb05e1-kube-api-access\") pod \"7c042e57-9d00-445c-aacd-590669eb05e1\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.422787 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c042e57-9d00-445c-aacd-590669eb05e1-kubelet-dir\") pod \"7c042e57-9d00-445c-aacd-590669eb05e1\" (UID: \"7c042e57-9d00-445c-aacd-590669eb05e1\") " Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.422947 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7c042e57-9d00-445c-aacd-590669eb05e1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7c042e57-9d00-445c-aacd-590669eb05e1" (UID: "7c042e57-9d00-445c-aacd-590669eb05e1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.423139 4954 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7c042e57-9d00-445c-aacd-590669eb05e1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.428536 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c042e57-9d00-445c-aacd-590669eb05e1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7c042e57-9d00-445c-aacd-590669eb05e1" (UID: "7c042e57-9d00-445c-aacd-590669eb05e1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.524954 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7c042e57-9d00-445c-aacd-590669eb05e1-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.977619 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7c042e57-9d00-445c-aacd-590669eb05e1","Type":"ContainerDied","Data":"1633ad2ef7dd2cbfc05fc7da7e7de382d34ecb95afd477f38a5d39f88dcd981b"} Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.977657 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1633ad2ef7dd2cbfc05fc7da7e7de382d34ecb95afd477f38a5d39f88dcd981b" Nov 28 16:12:48 crc kubenswrapper[4954]: I1128 16:12:48.977733 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 16:12:50 crc kubenswrapper[4954]: I1128 16:12:50.599255 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:50 crc kubenswrapper[4954]: I1128 16:12:50.611763 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:12:50 crc kubenswrapper[4954]: I1128 16:12:50.698171 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7g4lh" Nov 28 16:13:00 crc kubenswrapper[4954]: I1128 16:13:00.586688 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:13:10 crc kubenswrapper[4954]: I1128 16:13:10.820517 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7crr" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.786457 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 16:13:17 crc kubenswrapper[4954]: E1128 16:13:17.787067 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="187ace09-0627-4442-95b3-0e16a7a5eb93" containerName="pruner" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.787089 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="187ace09-0627-4442-95b3-0e16a7a5eb93" containerName="pruner" Nov 28 16:13:17 crc kubenswrapper[4954]: E1128 16:13:17.787118 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c042e57-9d00-445c-aacd-590669eb05e1" containerName="pruner" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.787129 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c042e57-9d00-445c-aacd-590669eb05e1" containerName="pruner" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.787277 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="187ace09-0627-4442-95b3-0e16a7a5eb93" containerName="pruner" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.787300 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c042e57-9d00-445c-aacd-590669eb05e1" containerName="pruner" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.787712 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.790074 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.790987 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.805802 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.932688 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bab66e76-c25e-4abb-8513-df4f18c99e90-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:17 crc kubenswrapper[4954]: I1128 16:13:17.933033 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bab66e76-c25e-4abb-8513-df4f18c99e90-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.034568 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bab66e76-c25e-4abb-8513-df4f18c99e90-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.034675 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bab66e76-c25e-4abb-8513-df4f18c99e90-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.034773 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bab66e76-c25e-4abb-8513-df4f18c99e90-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.068665 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bab66e76-c25e-4abb-8513-df4f18c99e90-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.132633 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.441444 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.441573 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.443805 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.444374 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.460177 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.543140 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.543286 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.546132 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.556142 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.569816 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.569938 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.575292 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:13:18 crc kubenswrapper[4954]: I1128 16:13:18.586859 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.190590 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.192341 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.200566 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.299376 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-kubelet-dir\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.299461 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/988f7288-0247-4444-8d30-71cfa932aa33-kube-api-access\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.299568 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-var-lock\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.400934 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-var-lock\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.401117 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-var-lock\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.401556 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-kubelet-dir\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.401750 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/988f7288-0247-4444-8d30-71cfa932aa33-kube-api-access\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.401575 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-kubelet-dir\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.433230 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/988f7288-0247-4444-8d30-71cfa932aa33-kube-api-access\") pod \"installer-9-crc\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:22 crc kubenswrapper[4954]: I1128 16:13:22.525555 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:13:25 crc kubenswrapper[4954]: I1128 16:13:25.395372 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:13:25 crc kubenswrapper[4954]: I1128 16:13:25.580253 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 16:13:32 crc kubenswrapper[4954]: I1128 16:13:32.481154 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:13:32 crc kubenswrapper[4954]: I1128 16:13:32.481746 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:13:48 crc kubenswrapper[4954]: E1128 16:13:48.433839 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 16:13:48 crc kubenswrapper[4954]: E1128 16:13:48.434614 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6ql8h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-5lwj2_openshift-marketplace(d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:48 crc kubenswrapper[4954]: E1128 16:13:48.435887 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-5lwj2" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" Nov 28 16:13:49 crc kubenswrapper[4954]: E1128 16:13:49.818644 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-5lwj2" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" Nov 28 16:13:50 crc kubenswrapper[4954]: E1128 16:13:50.095231 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 16:13:50 crc kubenswrapper[4954]: E1128 16:13:50.095818 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gdknf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-f5sl2_openshift-marketplace(500e43fe-7466-45bd-ab6c-9f357da02385): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:50 crc kubenswrapper[4954]: E1128 16:13:50.097189 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-f5sl2" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.060860 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.061016 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rz96t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-8zxgp_openshift-marketplace(4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.062221 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-8zxgp" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.156518 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.156669 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xgsls,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-l49q6_openshift-marketplace(a6c3823f-faef-4429-b6a0-a9c8678b7358): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.157863 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-l49q6" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.240125 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.240274 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xjznp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-ct8cs_openshift-marketplace(e7ab6809-c0bf-4b18-a7c8-9babc98adec1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.241588 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-ct8cs" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.407659 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.407854 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gtcrj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-tw7lr_openshift-marketplace(0563a539-f9ae-4640-a7f2-68027936fe45): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:51 crc kubenswrapper[4954]: E1128 16:13:51.409780 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-tw7lr" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.271388 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-ct8cs" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.272134 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-l49q6" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.272190 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-8zxgp" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.272265 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-tw7lr" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.319746 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.319899 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fqgt2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-xrnc2_openshift-marketplace(0a39712c-ebc2-4a9a-8205-537d4d00fae7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.321545 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-xrnc2" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.410829 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-xrnc2" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.529670 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.529820 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x6w7r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-zjx9w_openshift-marketplace(1e1da481-691f-4d64-8229-c8846cd6a778): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:13:54 crc kubenswrapper[4954]: E1128 16:13:54.530987 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-zjx9w" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" Nov 28 16:13:54 crc kubenswrapper[4954]: I1128 16:13:54.566725 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 16:13:54 crc kubenswrapper[4954]: W1128 16:13:54.870188 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-f5e7c383cb97560e6250163de9bd7612f03db4cbf6f5558a7150f0c2459f8f93 WatchSource:0}: Error finding container f5e7c383cb97560e6250163de9bd7612f03db4cbf6f5558a7150f0c2459f8f93: Status 404 returned error can't find the container with id f5e7c383cb97560e6250163de9bd7612f03db4cbf6f5558a7150f0c2459f8f93 Nov 28 16:13:54 crc kubenswrapper[4954]: I1128 16:13:54.888254 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 16:13:55 crc kubenswrapper[4954]: I1128 16:13:55.411911 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f5e7c383cb97560e6250163de9bd7612f03db4cbf6f5558a7150f0c2459f8f93"} Nov 28 16:13:55 crc kubenswrapper[4954]: I1128 16:13:55.418114 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"988f7288-0247-4444-8d30-71cfa932aa33","Type":"ContainerStarted","Data":"d05c4f42898183cb98e7782154ded145ba74748480f7257df54973345c7826b3"} Nov 28 16:13:55 crc kubenswrapper[4954]: I1128 16:13:55.420167 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"7cf557628aebf1c2696b2c7ae23c67eae44f87f87681a69f6ce860cc0dfd42ce"} Nov 28 16:13:55 crc kubenswrapper[4954]: I1128 16:13:55.422329 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"0dac556e45ac4f9f45c875161f23a3b280299549aaa0df0b6f7031a87a76b627"} Nov 28 16:13:55 crc kubenswrapper[4954]: I1128 16:13:55.423717 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"bab66e76-c25e-4abb-8513-df4f18c99e90","Type":"ContainerStarted","Data":"81b566be00d1cb6ab73b60a9311cac67e0dd516d2fb625e8994091b666ca9821"} Nov 28 16:13:55 crc kubenswrapper[4954]: E1128 16:13:55.425332 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-zjx9w" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.430015 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"82700f858792932e3a962a3c3ba0b806e9572be8d364d456dd31ba84b283d5b6"} Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.430679 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.431509 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"bab66e76-c25e-4abb-8513-df4f18c99e90","Type":"ContainerStarted","Data":"d6b4111330ce2a895cb706548d55acba57be8f7df770907bcbae6e11c03d1fbb"} Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.434948 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"63ff0032e7728ec1f70d2bc828ffc9e150a0eaf2d6fd2179069d0d5b469d63a8"} Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.438513 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"988f7288-0247-4444-8d30-71cfa932aa33","Type":"ContainerStarted","Data":"ec9d12dc409602aff05608586149050a9515912fc013d34c126ef39734fe09a1"} Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.440470 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"77e5d94b84295851537fe8d4a3af420db5b85dc87a9f86fdfb7eed112b8a5c5e"} Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.536122 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=39.536098779 podStartE2EDuration="39.536098779s" podCreationTimestamp="2025-11-28 16:13:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:13:56.508391266 +0000 UTC m=+189.900059857" watchObservedRunningTime="2025-11-28 16:13:56.536098779 +0000 UTC m=+189.927767330" Nov 28 16:13:56 crc kubenswrapper[4954]: I1128 16:13:56.558739 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=34.558714635 podStartE2EDuration="34.558714635s" podCreationTimestamp="2025-11-28 16:13:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:13:56.556765605 +0000 UTC m=+189.948434176" watchObservedRunningTime="2025-11-28 16:13:56.558714635 +0000 UTC m=+189.950383186" Nov 28 16:13:57 crc kubenswrapper[4954]: I1128 16:13:57.447893 4954 generic.go:334] "Generic (PLEG): container finished" podID="bab66e76-c25e-4abb-8513-df4f18c99e90" containerID="d6b4111330ce2a895cb706548d55acba57be8f7df770907bcbae6e11c03d1fbb" exitCode=0 Nov 28 16:13:57 crc kubenswrapper[4954]: I1128 16:13:57.449079 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"bab66e76-c25e-4abb-8513-df4f18c99e90","Type":"ContainerDied","Data":"d6b4111330ce2a895cb706548d55acba57be8f7df770907bcbae6e11c03d1fbb"} Nov 28 16:13:58 crc kubenswrapper[4954]: I1128 16:13:58.710432 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:13:58 crc kubenswrapper[4954]: I1128 16:13:58.779013 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bab66e76-c25e-4abb-8513-df4f18c99e90-kube-api-access\") pod \"bab66e76-c25e-4abb-8513-df4f18c99e90\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " Nov 28 16:13:58 crc kubenswrapper[4954]: I1128 16:13:58.779081 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bab66e76-c25e-4abb-8513-df4f18c99e90-kubelet-dir\") pod \"bab66e76-c25e-4abb-8513-df4f18c99e90\" (UID: \"bab66e76-c25e-4abb-8513-df4f18c99e90\") " Nov 28 16:13:58 crc kubenswrapper[4954]: I1128 16:13:58.779222 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bab66e76-c25e-4abb-8513-df4f18c99e90-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "bab66e76-c25e-4abb-8513-df4f18c99e90" (UID: "bab66e76-c25e-4abb-8513-df4f18c99e90"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:13:58 crc kubenswrapper[4954]: I1128 16:13:58.779483 4954 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bab66e76-c25e-4abb-8513-df4f18c99e90-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:58 crc kubenswrapper[4954]: I1128 16:13:58.785298 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bab66e76-c25e-4abb-8513-df4f18c99e90-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "bab66e76-c25e-4abb-8513-df4f18c99e90" (UID: "bab66e76-c25e-4abb-8513-df4f18c99e90"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:13:58 crc kubenswrapper[4954]: I1128 16:13:58.880986 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bab66e76-c25e-4abb-8513-df4f18c99e90-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:59 crc kubenswrapper[4954]: I1128 16:13:59.460202 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"bab66e76-c25e-4abb-8513-df4f18c99e90","Type":"ContainerDied","Data":"81b566be00d1cb6ab73b60a9311cac67e0dd516d2fb625e8994091b666ca9821"} Nov 28 16:13:59 crc kubenswrapper[4954]: I1128 16:13:59.460647 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81b566be00d1cb6ab73b60a9311cac67e0dd516d2fb625e8994091b666ca9821" Nov 28 16:13:59 crc kubenswrapper[4954]: I1128 16:13:59.460262 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 16:14:02 crc kubenswrapper[4954]: I1128 16:14:02.480398 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:14:02 crc kubenswrapper[4954]: I1128 16:14:02.482683 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:14:06 crc kubenswrapper[4954]: I1128 16:14:06.496687 4954 generic.go:334] "Generic (PLEG): container finished" podID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerID="a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c" exitCode=0 Nov 28 16:14:06 crc kubenswrapper[4954]: I1128 16:14:06.496756 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5lwj2" event={"ID":"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6","Type":"ContainerDied","Data":"a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c"} Nov 28 16:14:06 crc kubenswrapper[4954]: I1128 16:14:06.608677 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d6kn6"] Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.510278 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5lwj2" event={"ID":"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6","Type":"ContainerStarted","Data":"909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802"} Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.512500 4954 generic.go:334] "Generic (PLEG): container finished" podID="0563a539-f9ae-4640-a7f2-68027936fe45" containerID="9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c" exitCode=0 Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.512543 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tw7lr" event={"ID":"0563a539-f9ae-4640-a7f2-68027936fe45","Type":"ContainerDied","Data":"9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c"} Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.516354 4954 generic.go:334] "Generic (PLEG): container finished" podID="500e43fe-7466-45bd-ab6c-9f357da02385" containerID="dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d" exitCode=0 Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.516397 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5sl2" event={"ID":"500e43fe-7466-45bd-ab6c-9f357da02385","Type":"ContainerDied","Data":"dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d"} Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.518716 4954 generic.go:334] "Generic (PLEG): container finished" podID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerID="c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5" exitCode=0 Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.518745 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l49q6" event={"ID":"a6c3823f-faef-4429-b6a0-a9c8678b7358","Type":"ContainerDied","Data":"c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5"} Nov 28 16:14:08 crc kubenswrapper[4954]: I1128 16:14:08.549573 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5lwj2" podStartSLOduration=4.157461511 podStartE2EDuration="1m29.549557837s" podCreationTimestamp="2025-11-28 16:12:39 +0000 UTC" firstStartedPulling="2025-11-28 16:12:41.737667086 +0000 UTC m=+115.129335627" lastFinishedPulling="2025-11-28 16:14:07.129763412 +0000 UTC m=+200.521431953" observedRunningTime="2025-11-28 16:14:08.531878393 +0000 UTC m=+201.923546934" watchObservedRunningTime="2025-11-28 16:14:08.549557837 +0000 UTC m=+201.941226378" Nov 28 16:14:09 crc kubenswrapper[4954]: I1128 16:14:09.525147 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjx9w" event={"ID":"1e1da481-691f-4d64-8229-c8846cd6a778","Type":"ContainerStarted","Data":"2f40e73ac78483ad22c7b5025f36722644fb381a64ed62ea99017ec9129a1c20"} Nov 28 16:14:09 crc kubenswrapper[4954]: I1128 16:14:09.575580 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:14:09 crc kubenswrapper[4954]: I1128 16:14:09.575614 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:14:10 crc kubenswrapper[4954]: I1128 16:14:10.234415 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:14:11 crc kubenswrapper[4954]: I1128 16:14:11.536422 4954 generic.go:334] "Generic (PLEG): container finished" podID="1e1da481-691f-4d64-8229-c8846cd6a778" containerID="2f40e73ac78483ad22c7b5025f36722644fb381a64ed62ea99017ec9129a1c20" exitCode=0 Nov 28 16:14:11 crc kubenswrapper[4954]: I1128 16:14:11.536504 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjx9w" event={"ID":"1e1da481-691f-4d64-8229-c8846cd6a778","Type":"ContainerDied","Data":"2f40e73ac78483ad22c7b5025f36722644fb381a64ed62ea99017ec9129a1c20"} Nov 28 16:14:19 crc kubenswrapper[4954]: I1128 16:14:19.643239 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:14:19 crc kubenswrapper[4954]: I1128 16:14:19.696825 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5lwj2"] Nov 28 16:14:20 crc kubenswrapper[4954]: I1128 16:14:20.583768 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5lwj2" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="registry-server" containerID="cri-o://909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802" gracePeriod=2 Nov 28 16:14:20 crc kubenswrapper[4954]: I1128 16:14:20.913780 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.053302 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-utilities\") pod \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.053406 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ql8h\" (UniqueName: \"kubernetes.io/projected/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-kube-api-access-6ql8h\") pod \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.053529 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-catalog-content\") pod \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\" (UID: \"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6\") " Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.054944 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-utilities" (OuterVolumeSpecName: "utilities") pod "d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" (UID: "d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.063407 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-kube-api-access-6ql8h" (OuterVolumeSpecName: "kube-api-access-6ql8h") pod "d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" (UID: "d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6"). InnerVolumeSpecName "kube-api-access-6ql8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.147450 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" (UID: "d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.155307 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.155369 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ql8h\" (UniqueName: \"kubernetes.io/projected/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-kube-api-access-6ql8h\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.155400 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.589435 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerStarted","Data":"0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.592638 4954 generic.go:334] "Generic (PLEG): container finished" podID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerID="909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802" exitCode=0 Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.592698 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5lwj2" event={"ID":"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6","Type":"ContainerDied","Data":"909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.592725 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5lwj2" event={"ID":"d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6","Type":"ContainerDied","Data":"77b02e4ba0114d00099df49bd7b06db2e8337dc8368c1f6a3f9d8e3c329ddf1e"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.592741 4954 scope.go:117] "RemoveContainer" containerID="909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.592837 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5lwj2" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.599398 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjx9w" event={"ID":"1e1da481-691f-4d64-8229-c8846cd6a778","Type":"ContainerStarted","Data":"11f0d06cd920242e5daa274cd34dd62ceba1e157d6beefae783e6c683a337223"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.607364 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tw7lr" event={"ID":"0563a539-f9ae-4640-a7f2-68027936fe45","Type":"ContainerStarted","Data":"2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.611158 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5sl2" event={"ID":"500e43fe-7466-45bd-ab6c-9f357da02385","Type":"ContainerStarted","Data":"cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.611305 4954 scope.go:117] "RemoveContainer" containerID="a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.613202 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ct8cs" event={"ID":"e7ab6809-c0bf-4b18-a7c8-9babc98adec1","Type":"ContainerStarted","Data":"026bcb830696df0a2081e3bd94d8571f5d5e16178e4ddce960a57a9d46f39ce1"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.618433 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l49q6" event={"ID":"a6c3823f-faef-4429-b6a0-a9c8678b7358","Type":"ContainerStarted","Data":"496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.627102 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xrnc2" event={"ID":"0a39712c-ebc2-4a9a-8205-537d4d00fae7","Type":"ContainerStarted","Data":"33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915"} Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.651394 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l49q6" podStartSLOduration=3.987534758 podStartE2EDuration="1m42.651375767s" podCreationTimestamp="2025-11-28 16:12:39 +0000 UTC" firstStartedPulling="2025-11-28 16:12:41.746201853 +0000 UTC m=+115.137870394" lastFinishedPulling="2025-11-28 16:14:20.410042852 +0000 UTC m=+213.801711403" observedRunningTime="2025-11-28 16:14:21.648432465 +0000 UTC m=+215.040101006" watchObservedRunningTime="2025-11-28 16:14:21.651375767 +0000 UTC m=+215.043044318" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.665612 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f5sl2" podStartSLOduration=5.024118373 podStartE2EDuration="1m43.665589953s" podCreationTimestamp="2025-11-28 16:12:38 +0000 UTC" firstStartedPulling="2025-11-28 16:12:41.771670649 +0000 UTC m=+115.163339180" lastFinishedPulling="2025-11-28 16:14:20.413142179 +0000 UTC m=+213.804810760" observedRunningTime="2025-11-28 16:14:21.665007575 +0000 UTC m=+215.056676106" watchObservedRunningTime="2025-11-28 16:14:21.665589953 +0000 UTC m=+215.057258504" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.681076 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zjx9w" podStartSLOduration=3.098679191 podStartE2EDuration="1m39.681059749s" podCreationTimestamp="2025-11-28 16:12:42 +0000 UTC" firstStartedPulling="2025-11-28 16:12:43.874655239 +0000 UTC m=+117.266323780" lastFinishedPulling="2025-11-28 16:14:20.457035777 +0000 UTC m=+213.848704338" observedRunningTime="2025-11-28 16:14:21.680927765 +0000 UTC m=+215.072596316" watchObservedRunningTime="2025-11-28 16:14:21.681059749 +0000 UTC m=+215.072728290" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.694385 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5lwj2"] Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.699296 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5lwj2"] Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.717705 4954 scope.go:117] "RemoveContainer" containerID="a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.737721 4954 scope.go:117] "RemoveContainer" containerID="909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802" Nov 28 16:14:21 crc kubenswrapper[4954]: E1128 16:14:21.738057 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802\": container with ID starting with 909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802 not found: ID does not exist" containerID="909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.738084 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802"} err="failed to get container status \"909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802\": rpc error: code = NotFound desc = could not find container \"909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802\": container with ID starting with 909580917d6b6cc6b6d1b51161f116d8f858b54bae29ddf03ccf1e0f87a10802 not found: ID does not exist" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.738164 4954 scope.go:117] "RemoveContainer" containerID="a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c" Nov 28 16:14:21 crc kubenswrapper[4954]: E1128 16:14:21.738506 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c\": container with ID starting with a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c not found: ID does not exist" containerID="a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.738588 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c"} err="failed to get container status \"a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c\": rpc error: code = NotFound desc = could not find container \"a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c\": container with ID starting with a5fc307537e97dab26a63d693f6099e1f843fe5ab2294293140d0419cf28ed5c not found: ID does not exist" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.738639 4954 scope.go:117] "RemoveContainer" containerID="a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1" Nov 28 16:14:21 crc kubenswrapper[4954]: E1128 16:14:21.738992 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1\": container with ID starting with a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1 not found: ID does not exist" containerID="a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.739017 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1"} err="failed to get container status \"a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1\": rpc error: code = NotFound desc = could not find container \"a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1\": container with ID starting with a76f87bf415d1f16b481d6f27b1ad6ab110db92a8d5d58a0a0a04d465786efe1 not found: ID does not exist" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.754942 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tw7lr" podStartSLOduration=3.98218635 podStartE2EDuration="1m43.754924387s" podCreationTimestamp="2025-11-28 16:12:38 +0000 UTC" firstStartedPulling="2025-11-28 16:12:40.669489955 +0000 UTC m=+114.061158496" lastFinishedPulling="2025-11-28 16:14:20.442227952 +0000 UTC m=+213.833896533" observedRunningTime="2025-11-28 16:14:21.737536032 +0000 UTC m=+215.129204573" watchObservedRunningTime="2025-11-28 16:14:21.754924387 +0000 UTC m=+215.146592928" Nov 28 16:14:21 crc kubenswrapper[4954]: I1128 16:14:21.863087 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" path="/var/lib/kubelet/pods/d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6/volumes" Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.636693 4954 generic.go:334] "Generic (PLEG): container finished" podID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerID="026bcb830696df0a2081e3bd94d8571f5d5e16178e4ddce960a57a9d46f39ce1" exitCode=0 Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.636770 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ct8cs" event={"ID":"e7ab6809-c0bf-4b18-a7c8-9babc98adec1","Type":"ContainerDied","Data":"026bcb830696df0a2081e3bd94d8571f5d5e16178e4ddce960a57a9d46f39ce1"} Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.636828 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ct8cs" event={"ID":"e7ab6809-c0bf-4b18-a7c8-9babc98adec1","Type":"ContainerStarted","Data":"58863e7d9af0800ddd854dad3d55bcaa565d7a0a953fe9f1b3aa174439e07a15"} Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.638821 4954 generic.go:334] "Generic (PLEG): container finished" podID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerID="33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915" exitCode=0 Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.638889 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xrnc2" event={"ID":"0a39712c-ebc2-4a9a-8205-537d4d00fae7","Type":"ContainerDied","Data":"33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915"} Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.640829 4954 generic.go:334] "Generic (PLEG): container finished" podID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerID="0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a" exitCode=0 Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.640874 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerDied","Data":"0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a"} Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.663918 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ct8cs" podStartSLOduration=3.070913214 podStartE2EDuration="1m41.663898782s" podCreationTimestamp="2025-11-28 16:12:41 +0000 UTC" firstStartedPulling="2025-11-28 16:12:43.880712326 +0000 UTC m=+117.272380867" lastFinishedPulling="2025-11-28 16:14:22.473697894 +0000 UTC m=+215.865366435" observedRunningTime="2025-11-28 16:14:22.661241699 +0000 UTC m=+216.052910260" watchObservedRunningTime="2025-11-28 16:14:22.663898782 +0000 UTC m=+216.055567333" Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.724929 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:14:22 crc kubenswrapper[4954]: I1128 16:14:22.725065 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:14:23 crc kubenswrapper[4954]: I1128 16:14:23.648296 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerStarted","Data":"705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c"} Nov 28 16:14:23 crc kubenswrapper[4954]: I1128 16:14:23.667474 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8zxgp" podStartSLOduration=4.110732116 podStartE2EDuration="1m43.667449515s" podCreationTimestamp="2025-11-28 16:12:40 +0000 UTC" firstStartedPulling="2025-11-28 16:12:43.872368516 +0000 UTC m=+117.264037057" lastFinishedPulling="2025-11-28 16:14:23.429085905 +0000 UTC m=+216.820754456" observedRunningTime="2025-11-28 16:14:23.665256387 +0000 UTC m=+217.056924928" watchObservedRunningTime="2025-11-28 16:14:23.667449515 +0000 UTC m=+217.059118086" Nov 28 16:14:23 crc kubenswrapper[4954]: I1128 16:14:23.759233 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zjx9w" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="registry-server" probeResult="failure" output=< Nov 28 16:14:23 crc kubenswrapper[4954]: timeout: failed to connect service ":50051" within 1s Nov 28 16:14:23 crc kubenswrapper[4954]: > Nov 28 16:14:24 crc kubenswrapper[4954]: I1128 16:14:24.655730 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xrnc2" event={"ID":"0a39712c-ebc2-4a9a-8205-537d4d00fae7","Type":"ContainerStarted","Data":"3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c"} Nov 28 16:14:28 crc kubenswrapper[4954]: I1128 16:14:28.581674 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 16:14:28 crc kubenswrapper[4954]: I1128 16:14:28.612472 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xrnc2" podStartSLOduration=7.93634257 podStartE2EDuration="1m47.612445008s" podCreationTimestamp="2025-11-28 16:12:41 +0000 UTC" firstStartedPulling="2025-11-28 16:12:43.871024531 +0000 UTC m=+117.262693072" lastFinishedPulling="2025-11-28 16:14:23.547126959 +0000 UTC m=+216.938795510" observedRunningTime="2025-11-28 16:14:24.676697438 +0000 UTC m=+218.068365989" watchObservedRunningTime="2025-11-28 16:14:28.612445008 +0000 UTC m=+222.004113589" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.128421 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.128498 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.187355 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.358895 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.358969 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.419408 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.746359 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.748951 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.824868 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.826467 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:14:29 crc kubenswrapper[4954]: I1128 16:14:29.906866 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:14:30 crc kubenswrapper[4954]: I1128 16:14:30.759954 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.346923 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.346966 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.406982 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.646554 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" podUID="77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" containerName="oauth-openshift" containerID="cri-o://c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a" gracePeriod=15 Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.710167 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.710216 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.773519 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:14:31 crc kubenswrapper[4954]: I1128 16:14:31.774756 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.348829 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.348892 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.427200 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.481665 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.481779 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.481900 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.485184 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.485293 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046" gracePeriod=600 Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.630604 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.678647 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-55c5844bb-z65ps"] Nov 28 16:14:32 crc kubenswrapper[4954]: E1128 16:14:32.678948 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="registry-server" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.678975 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="registry-server" Nov 28 16:14:32 crc kubenswrapper[4954]: E1128 16:14:32.678996 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bab66e76-c25e-4abb-8513-df4f18c99e90" containerName="pruner" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.679010 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="bab66e76-c25e-4abb-8513-df4f18c99e90" containerName="pruner" Nov 28 16:14:32 crc kubenswrapper[4954]: E1128 16:14:32.679025 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" containerName="oauth-openshift" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.679038 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" containerName="oauth-openshift" Nov 28 16:14:32 crc kubenswrapper[4954]: E1128 16:14:32.679063 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="extract-utilities" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.679076 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="extract-utilities" Nov 28 16:14:32 crc kubenswrapper[4954]: E1128 16:14:32.679104 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="extract-content" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.679117 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="extract-content" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.679283 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3e07b91-560c-43c3-ab3f-cfe0e4c58ea6" containerName="registry-server" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.679302 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="bab66e76-c25e-4abb-8513-df4f18c99e90" containerName="pruner" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.679318 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" containerName="oauth-openshift" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.680084 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.709162 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-55c5844bb-z65ps"] Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721034 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-service-ca\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721081 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-cliconfig\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721141 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-ocp-branding-template\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721164 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-login\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721210 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-error\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721234 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-router-certs\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721272 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-session\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721303 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c9vr\" (UniqueName: \"kubernetes.io/projected/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-kube-api-access-9c9vr\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721326 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-trusted-ca-bundle\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721365 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-policies\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721385 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-dir\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721406 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-serving-cert\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721444 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-idp-0-file-data\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721470 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-provider-selection\") pod \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\" (UID: \"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba\") " Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721587 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721632 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721654 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-error\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721703 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.721730 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.722767 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/041553f2-67ec-493e-84e3-be25a234f5d6-audit-dir\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.722831 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-router-certs\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.722857 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.722889 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-service-ca\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.722940 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-session\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.722968 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.723019 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-audit-policies\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.723052 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-login\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.723428 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d4fh\" (UniqueName: \"kubernetes.io/projected/041553f2-67ec-493e-84e3-be25a234f5d6-kube-api-access-8d4fh\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.723485 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.726128 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.726609 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.726899 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.726938 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.754363 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.754611 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.756687 4954 generic.go:334] "Generic (PLEG): container finished" podID="77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" containerID="c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a" exitCode=0 Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.756757 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" event={"ID":"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba","Type":"ContainerDied","Data":"c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a"} Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.756788 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" event={"ID":"77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba","Type":"ContainerDied","Data":"2b7ddbf9d0ce8fbca4012e3b71157172e5dc90bc7ac30d2f2ffd3e945f5b7880"} Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.756809 4954 scope.go:117] "RemoveContainer" containerID="c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.756923 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d6kn6" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.768017 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-kube-api-access-9c9vr" (OuterVolumeSpecName: "kube-api-access-9c9vr") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "kube-api-access-9c9vr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.783901 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.787662 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.790411 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.793893 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.794036 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.794585 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" (UID: "77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.798145 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046" exitCode=0 Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.798181 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046"} Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.811308 4954 scope.go:117] "RemoveContainer" containerID="c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a" Nov 28 16:14:32 crc kubenswrapper[4954]: E1128 16:14:32.812319 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a\": container with ID starting with c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a not found: ID does not exist" containerID="c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.812357 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a"} err="failed to get container status \"c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a\": rpc error: code = NotFound desc = could not find container \"c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a\": container with ID starting with c1e4eed5b3ad0f22b06ecb6b9513d188a9898eb56273d8ce7a6c68584655990a not found: ID does not exist" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.818677 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831352 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831451 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/041553f2-67ec-493e-84e3-be25a234f5d6-audit-dir\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831493 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-router-certs\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831527 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831578 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-service-ca\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831612 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-session\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831660 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831700 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-audit-policies\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831746 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-login\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831773 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d4fh\" (UniqueName: \"kubernetes.io/projected/041553f2-67ec-493e-84e3-be25a234f5d6-kube-api-access-8d4fh\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831806 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831836 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831889 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-error\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.831949 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832039 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832061 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832083 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832098 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c9vr\" (UniqueName: \"kubernetes.io/projected/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-kube-api-access-9c9vr\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832112 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832131 4954 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832144 4954 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832157 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832171 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832190 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832204 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832217 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832233 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.832251 4954 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.836189 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-cliconfig\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.836279 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-audit-policies\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.837163 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-service-ca\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.841894 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.842908 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-session\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.843067 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-serving-cert\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.843638 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-router-certs\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.843678 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/041553f2-67ec-493e-84e3-be25a234f5d6-audit-dir\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.844324 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.845435 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-login\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.845723 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.848697 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.852195 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.855236 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.855450 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/041553f2-67ec-493e-84e3-be25a234f5d6-v4-0-config-user-template-error\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.868227 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d4fh\" (UniqueName: \"kubernetes.io/projected/041553f2-67ec-493e-84e3-be25a234f5d6-kube-api-access-8d4fh\") pod \"oauth-openshift-55c5844bb-z65ps\" (UID: \"041553f2-67ec-493e-84e3-be25a234f5d6\") " pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:32 crc kubenswrapper[4954]: I1128 16:14:32.873344 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.091794 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l49q6"] Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.098565 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d6kn6"] Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.106964 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d6kn6"] Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.121434 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.402584 4954 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.403628 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.404115 4954 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.404709 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c" gracePeriod=15 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.404753 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532" gracePeriod=15 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.404806 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515" gracePeriod=15 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.404856 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36" gracePeriod=15 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405220 4954 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405370 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405384 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405395 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405404 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405415 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405422 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405435 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405443 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405452 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405460 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405470 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405478 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405490 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405498 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405654 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405670 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405685 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405696 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405705 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405715 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.405832 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405842 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.405956 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.406587 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523" gracePeriod=15 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.440970 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.441024 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.441049 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.441083 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.441105 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.441181 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.441262 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.441338 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542163 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542267 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542298 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542325 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542357 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542402 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542423 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542448 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542551 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542604 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542634 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542660 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542690 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542718 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542743 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.542767 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.764516 4954 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 16:14:33 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256" Netns:"/var/run/netns/2f7f8853-375b-472f-a572-acf0ef43e485" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:33 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:33 crc kubenswrapper[4954]: > Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.764619 4954 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 16:14:33 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256" Netns:"/var/run/netns/2f7f8853-375b-472f-a572-acf0ef43e485" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:33 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:33 crc kubenswrapper[4954]: > pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.764644 4954 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 16:14:33 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256" Netns:"/var/run/netns/2f7f8853-375b-472f-a572-acf0ef43e485" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:33 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:33 crc kubenswrapper[4954]: > pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.764751 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256\\\" Netns:\\\"/var/run/netns/2f7f8853-375b-472f-a572-acf0ef43e485\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s\\\": dial tcp 38.102.83.173:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:14:33 crc kubenswrapper[4954]: E1128 16:14:33.765736 4954 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.173:6443: connect: connection refused" event=< Nov 28 16:14:33 crc kubenswrapper[4954]: &Event{ObjectMeta:{oauth-openshift-55c5844bb-z65ps.187c37c51d74a810 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-55c5844bb-z65ps,UID:041553f2-67ec-493e-84e3-be25a234f5d6,APIVersion:v1,ResourceVersion:29351,FieldPath:,},Reason:FailedCreatePodSandBox,Message:Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256" Netns:"/var/run/netns/2f7f8853-375b-472f-a572-acf0ef43e485" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:33 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"},Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:14:33.764661264 +0000 UTC m=+227.156329805,LastTimestamp:2025-11-28 16:14:33.764661264 +0000 UTC m=+227.156329805,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Nov 28 16:14:33 crc kubenswrapper[4954]: > Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.811718 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"cb24ea3233adacc195fae8733d377a677bc6d6ac461d682e127e50d4c1e27874"} Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.812460 4954 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.813018 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.814616 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.816033 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.816790 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c" exitCode=0 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.816819 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36" exitCode=0 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.816830 4954 scope.go:117] "RemoveContainer" containerID="4b4deb6285441d531f0c21c04e9d7e97a1624abe6ae3c6fd4ede7930aca3cfd3" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.816830 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515" exitCode=0 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.816915 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532" exitCode=2 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.817248 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l49q6" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="registry-server" containerID="cri-o://496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63" gracePeriod=2 Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.817687 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.817817 4954 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.818324 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.819140 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.819651 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:33 crc kubenswrapper[4954]: I1128 16:14:33.864487 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba" path="/var/lib/kubelet/pods/77fca8e5-3ff0-4e69-895c-2ddd5f84b3ba/volumes" Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.259666 4954 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 16:14:34 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3" Netns:"/var/run/netns/9a61974a-d08a-4faa-a970-97ce1ff8b806" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:34 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:34 crc kubenswrapper[4954]: > Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.259748 4954 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 16:14:34 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3" Netns:"/var/run/netns/9a61974a-d08a-4faa-a970-97ce1ff8b806" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:34 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:34 crc kubenswrapper[4954]: > pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.259768 4954 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 16:14:34 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3" Netns:"/var/run/netns/9a61974a-d08a-4faa-a970-97ce1ff8b806" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:34 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:34 crc kubenswrapper[4954]: > pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.259834 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3\\\" Netns:\\\"/var/run/netns/9a61974a-d08a-4faa-a970-97ce1ff8b806\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=a272e9fd8f12c5d5f26db24443610341f1fe35bb9011abf7b5df9c4b02dcbca3;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s\\\": dial tcp 38.102.83.173:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.656738 4954 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.173:6443: connect: connection refused" event=< Nov 28 16:14:34 crc kubenswrapper[4954]: &Event{ObjectMeta:{oauth-openshift-55c5844bb-z65ps.187c37c51d74a810 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-55c5844bb-z65ps,UID:041553f2-67ec-493e-84e3-be25a234f5d6,APIVersion:v1,ResourceVersion:29351,FieldPath:,},Reason:FailedCreatePodSandBox,Message:Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256" Netns:"/var/run/netns/2f7f8853-375b-472f-a572-acf0ef43e485" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:34 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"},Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:14:33.764661264 +0000 UTC m=+227.156329805,LastTimestamp:2025-11-28 16:14:33.764661264 +0000 UTC m=+227.156329805,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Nov 28 16:14:34 crc kubenswrapper[4954]: > Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.697827 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.698471 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.698881 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.756278 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgsls\" (UniqueName: \"kubernetes.io/projected/a6c3823f-faef-4429-b6a0-a9c8678b7358-kube-api-access-xgsls\") pod \"a6c3823f-faef-4429-b6a0-a9c8678b7358\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.756338 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-utilities\") pod \"a6c3823f-faef-4429-b6a0-a9c8678b7358\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.756390 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-catalog-content\") pod \"a6c3823f-faef-4429-b6a0-a9c8678b7358\" (UID: \"a6c3823f-faef-4429-b6a0-a9c8678b7358\") " Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.758207 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-utilities" (OuterVolumeSpecName: "utilities") pod "a6c3823f-faef-4429-b6a0-a9c8678b7358" (UID: "a6c3823f-faef-4429-b6a0-a9c8678b7358"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.766609 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6c3823f-faef-4429-b6a0-a9c8678b7358-kube-api-access-xgsls" (OuterVolumeSpecName: "kube-api-access-xgsls") pod "a6c3823f-faef-4429-b6a0-a9c8678b7358" (UID: "a6c3823f-faef-4429-b6a0-a9c8678b7358"). InnerVolumeSpecName "kube-api-access-xgsls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.821032 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6c3823f-faef-4429-b6a0-a9c8678b7358" (UID: "a6c3823f-faef-4429-b6a0-a9c8678b7358"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.827940 4954 generic.go:334] "Generic (PLEG): container finished" podID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerID="496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63" exitCode=0 Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.828088 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l49q6" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.828079 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l49q6" event={"ID":"a6c3823f-faef-4429-b6a0-a9c8678b7358","Type":"ContainerDied","Data":"496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63"} Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.828323 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l49q6" event={"ID":"a6c3823f-faef-4429-b6a0-a9c8678b7358","Type":"ContainerDied","Data":"88392424aa746a96b2becac10029bd59780d741b637058f493b020bf3ea77eb1"} Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.828392 4954 scope.go:117] "RemoveContainer" containerID="496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.829015 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.830186 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.832144 4954 generic.go:334] "Generic (PLEG): container finished" podID="988f7288-0247-4444-8d30-71cfa932aa33" containerID="ec9d12dc409602aff05608586149050a9515912fc013d34c126ef39734fe09a1" exitCode=0 Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.832343 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"988f7288-0247-4444-8d30-71cfa932aa33","Type":"ContainerDied","Data":"ec9d12dc409602aff05608586149050a9515912fc013d34c126ef39734fe09a1"} Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.834339 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.834924 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.835256 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.838067 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.853211 4954 scope.go:117] "RemoveContainer" containerID="c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.853427 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.854282 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.854825 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.857978 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.858006 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6c3823f-faef-4429-b6a0-a9c8678b7358-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.858021 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgsls\" (UniqueName: \"kubernetes.io/projected/a6c3823f-faef-4429-b6a0-a9c8678b7358-kube-api-access-xgsls\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.891662 4954 scope.go:117] "RemoveContainer" containerID="9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.908464 4954 scope.go:117] "RemoveContainer" containerID="496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63" Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.908833 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63\": container with ID starting with 496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63 not found: ID does not exist" containerID="496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.908879 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63"} err="failed to get container status \"496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63\": rpc error: code = NotFound desc = could not find container \"496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63\": container with ID starting with 496702a1bf72adfe73e6f6dccd6fa77c5583a0c6bc8f56e8d8614cc75eb69b63 not found: ID does not exist" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.908915 4954 scope.go:117] "RemoveContainer" containerID="c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5" Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.909301 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5\": container with ID starting with c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5 not found: ID does not exist" containerID="c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.909363 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5"} err="failed to get container status \"c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5\": rpc error: code = NotFound desc = could not find container \"c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5\": container with ID starting with c8ae4445850075c5a85a8157da7ff67c5a570ee911aa5ccc2bd181985839cfc5 not found: ID does not exist" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.909398 4954 scope.go:117] "RemoveContainer" containerID="9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c" Nov 28 16:14:34 crc kubenswrapper[4954]: E1128 16:14:34.909933 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c\": container with ID starting with 9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c not found: ID does not exist" containerID="9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c" Nov 28 16:14:34 crc kubenswrapper[4954]: I1128 16:14:34.909972 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c"} err="failed to get container status \"9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c\": rpc error: code = NotFound desc = could not find container \"9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c\": container with ID starting with 9d7883087c30a27ba08a46e3b5c76754ee186b4f3469be87d846cf88673fc94c not found: ID does not exist" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.774678 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.776398 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.777076 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.777602 4954 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.778056 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.778401 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.857705 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.859383 4954 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523" exitCode=0 Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.859756 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872068 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872222 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872246 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872331 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872339 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872481 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872845 4954 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872884 4954 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.872911 4954 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.873578 4954 scope.go:117] "RemoveContainer" containerID="799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.897729 4954 scope.go:117] "RemoveContainer" containerID="12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.928367 4954 scope.go:117] "RemoveContainer" containerID="f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.949631 4954 scope.go:117] "RemoveContainer" containerID="7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.968553 4954 scope.go:117] "RemoveContainer" containerID="e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523" Nov 28 16:14:35 crc kubenswrapper[4954]: I1128 16:14:35.983832 4954 scope.go:117] "RemoveContainer" containerID="3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.019915 4954 scope.go:117] "RemoveContainer" containerID="799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c" Nov 28 16:14:36 crc kubenswrapper[4954]: E1128 16:14:36.020718 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\": container with ID starting with 799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c not found: ID does not exist" containerID="799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.020753 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c"} err="failed to get container status \"799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\": rpc error: code = NotFound desc = could not find container \"799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c\": container with ID starting with 799a5d56a60567fe44af38d862bbf721de7c0df27f403450ed983c682f96315c not found: ID does not exist" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.020780 4954 scope.go:117] "RemoveContainer" containerID="12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36" Nov 28 16:14:36 crc kubenswrapper[4954]: E1128 16:14:36.021796 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\": container with ID starting with 12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36 not found: ID does not exist" containerID="12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.021819 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36"} err="failed to get container status \"12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\": rpc error: code = NotFound desc = could not find container \"12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36\": container with ID starting with 12218c2568d220b5ffd560ea260f4f357578d6ce1ea35fae57b02f0b51c1ac36 not found: ID does not exist" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.021837 4954 scope.go:117] "RemoveContainer" containerID="f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515" Nov 28 16:14:36 crc kubenswrapper[4954]: E1128 16:14:36.023042 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\": container with ID starting with f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515 not found: ID does not exist" containerID="f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.023065 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515"} err="failed to get container status \"f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\": rpc error: code = NotFound desc = could not find container \"f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515\": container with ID starting with f4664505c90f9e54ef09d5db59a7f902348ba8f11ac058efd9e1caecc6135515 not found: ID does not exist" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.023083 4954 scope.go:117] "RemoveContainer" containerID="7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532" Nov 28 16:14:36 crc kubenswrapper[4954]: E1128 16:14:36.023623 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\": container with ID starting with 7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532 not found: ID does not exist" containerID="7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.023645 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532"} err="failed to get container status \"7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\": rpc error: code = NotFound desc = could not find container \"7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532\": container with ID starting with 7bfb707aaf32a3dcfa7a8041a1b2ecffe8001e1706829914ed74195d2a3c5532 not found: ID does not exist" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.023662 4954 scope.go:117] "RemoveContainer" containerID="e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523" Nov 28 16:14:36 crc kubenswrapper[4954]: E1128 16:14:36.023981 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\": container with ID starting with e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523 not found: ID does not exist" containerID="e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.024002 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523"} err="failed to get container status \"e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\": rpc error: code = NotFound desc = could not find container \"e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523\": container with ID starting with e5972d0d73e5fc4f05e6b59c2d0abbfe118b34c716611a318887842a6b25b523 not found: ID does not exist" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.024019 4954 scope.go:117] "RemoveContainer" containerID="3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107" Nov 28 16:14:36 crc kubenswrapper[4954]: E1128 16:14:36.024403 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\": container with ID starting with 3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107 not found: ID does not exist" containerID="3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.024429 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107"} err="failed to get container status \"3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\": rpc error: code = NotFound desc = could not find container \"3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107\": container with ID starting with 3b6993dbcb88a7ab4c5bf2fe2b5f05f2c31b6931b1bc37c80268ee3a1e2a1107 not found: ID does not exist" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.143211 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.143910 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.144601 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.144932 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.176925 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-kubelet-dir\") pod \"988f7288-0247-4444-8d30-71cfa932aa33\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.176987 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-var-lock\") pod \"988f7288-0247-4444-8d30-71cfa932aa33\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.177046 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/988f7288-0247-4444-8d30-71cfa932aa33-kube-api-access\") pod \"988f7288-0247-4444-8d30-71cfa932aa33\" (UID: \"988f7288-0247-4444-8d30-71cfa932aa33\") " Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.177115 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "988f7288-0247-4444-8d30-71cfa932aa33" (UID: "988f7288-0247-4444-8d30-71cfa932aa33"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.177157 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-var-lock" (OuterVolumeSpecName: "var-lock") pod "988f7288-0247-4444-8d30-71cfa932aa33" (UID: "988f7288-0247-4444-8d30-71cfa932aa33"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.177404 4954 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.177431 4954 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/988f7288-0247-4444-8d30-71cfa932aa33-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.177926 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.178437 4954 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.179020 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.179385 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.183438 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/988f7288-0247-4444-8d30-71cfa932aa33-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "988f7288-0247-4444-8d30-71cfa932aa33" (UID: "988f7288-0247-4444-8d30-71cfa932aa33"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.278897 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/988f7288-0247-4444-8d30-71cfa932aa33-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.869383 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"988f7288-0247-4444-8d30-71cfa932aa33","Type":"ContainerDied","Data":"d05c4f42898183cb98e7782154ded145ba74748480f7257df54973345c7826b3"} Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.869428 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d05c4f42898183cb98e7782154ded145ba74748480f7257df54973345c7826b3" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.869467 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.887429 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.887935 4954 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.888260 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:36 crc kubenswrapper[4954]: I1128 16:14:36.888586 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:37 crc kubenswrapper[4954]: I1128 16:14:37.865494 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:37 crc kubenswrapper[4954]: I1128 16:14:37.866647 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:37 crc kubenswrapper[4954]: I1128 16:14:37.867409 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 16:14:37 crc kubenswrapper[4954]: I1128 16:14:37.868558 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:37 crc kubenswrapper[4954]: I1128 16:14:37.869055 4954 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:38 crc kubenswrapper[4954]: E1128 16:14:38.445959 4954 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.173:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:38 crc kubenswrapper[4954]: I1128 16:14:38.446760 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:38 crc kubenswrapper[4954]: I1128 16:14:38.887460 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3"} Nov 28 16:14:38 crc kubenswrapper[4954]: I1128 16:14:38.887996 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"cad0ed844d6d46287af4d7c4be089b62f958e57fbe611d38316900df9a23cb6f"} Nov 28 16:14:38 crc kubenswrapper[4954]: E1128 16:14:38.888739 4954 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.173:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:14:38 crc kubenswrapper[4954]: I1128 16:14:38.888808 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:38 crc kubenswrapper[4954]: I1128 16:14:38.889854 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:38 crc kubenswrapper[4954]: I1128 16:14:38.890251 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:41 crc kubenswrapper[4954]: E1128 16:14:41.757836 4954 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:41 crc kubenswrapper[4954]: E1128 16:14:41.760686 4954 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:41 crc kubenswrapper[4954]: E1128 16:14:41.763335 4954 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:41 crc kubenswrapper[4954]: E1128 16:14:41.764120 4954 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:41 crc kubenswrapper[4954]: E1128 16:14:41.764649 4954 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:41 crc kubenswrapper[4954]: I1128 16:14:41.764708 4954 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 16:14:41 crc kubenswrapper[4954]: E1128 16:14:41.765121 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="200ms" Nov 28 16:14:41 crc kubenswrapper[4954]: E1128 16:14:41.965958 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="400ms" Nov 28 16:14:42 crc kubenswrapper[4954]: E1128 16:14:42.367039 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="800ms" Nov 28 16:14:43 crc kubenswrapper[4954]: E1128 16:14:43.168688 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="1.6s" Nov 28 16:14:44 crc kubenswrapper[4954]: E1128 16:14:44.658035 4954 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/events\": dial tcp 38.102.83.173:6443: connect: connection refused" event=< Nov 28 16:14:44 crc kubenswrapper[4954]: &Event{ObjectMeta:{oauth-openshift-55c5844bb-z65ps.187c37c51d74a810 openshift-authentication 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-authentication,Name:oauth-openshift-55c5844bb-z65ps,UID:041553f2-67ec-493e-84e3-be25a234f5d6,APIVersion:v1,ResourceVersion:29351,FieldPath:,},Reason:FailedCreatePodSandBox,Message:Failed to create pod sandbox: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256" Netns:"/var/run/netns/2f7f8853-375b-472f-a572-acf0ef43e485" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=121194fa5317422490d9b59f67e584245259372d0b3ff4c67a44323a2d1cb256;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:44 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"},Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 16:14:33.764661264 +0000 UTC m=+227.156329805,LastTimestamp:2025-11-28 16:14:33.764661264 +0000 UTC m=+227.156329805,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Nov 28 16:14:44 crc kubenswrapper[4954]: > Nov 28 16:14:44 crc kubenswrapper[4954]: E1128 16:14:44.770544 4954 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.173:6443: connect: connection refused" interval="3.2s" Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.855595 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.856758 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.857194 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.857623 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.871571 4954 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.871617 4954 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:44 crc kubenswrapper[4954]: E1128 16:14:44.872126 4954 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.872823 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:44 crc kubenswrapper[4954]: W1128 16:14:44.894910 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-00a9ec9356d420e203443e8fefa0accf62276624ce5cbd0015d0959aca9fdd90 WatchSource:0}: Error finding container 00a9ec9356d420e203443e8fefa0accf62276624ce5cbd0015d0959aca9fdd90: Status 404 returned error can't find the container with id 00a9ec9356d420e203443e8fefa0accf62276624ce5cbd0015d0959aca9fdd90 Nov 28 16:14:44 crc kubenswrapper[4954]: I1128 16:14:44.919778 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"00a9ec9356d420e203443e8fefa0accf62276624ce5cbd0015d0959aca9fdd90"} Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.855351 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.856494 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.928149 4954 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="d489e83a12c50a18620d30c73085366a4894733baece5c3375b26d030de850ba" exitCode=0 Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.928239 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"d489e83a12c50a18620d30c73085366a4894733baece5c3375b26d030de850ba"} Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.928667 4954 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.928706 4954 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:45 crc kubenswrapper[4954]: E1128 16:14:45.929072 4954 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.929098 4954 status_manager.go:851] "Failed to get status for pod" podUID="988f7288-0247-4444-8d30-71cfa932aa33" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.929674 4954 status_manager.go:851] "Failed to get status for pod" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" pod="openshift-marketplace/community-operators-l49q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-l49q6\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:45 crc kubenswrapper[4954]: I1128 16:14:45.930292 4954 status_manager.go:851] "Failed to get status for pod" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-jprxj\": dial tcp 38.102.83.173:6443: connect: connection refused" Nov 28 16:14:46 crc kubenswrapper[4954]: E1128 16:14:46.502836 4954 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 16:14:46 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0" Netns:"/var/run/netns/9407986b-4a31-4b26-b9ae-b8db4869933a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:46 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:46 crc kubenswrapper[4954]: > Nov 28 16:14:46 crc kubenswrapper[4954]: E1128 16:14:46.503782 4954 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 16:14:46 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0" Netns:"/var/run/netns/9407986b-4a31-4b26-b9ae-b8db4869933a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:46 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:46 crc kubenswrapper[4954]: > pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:46 crc kubenswrapper[4954]: E1128 16:14:46.503820 4954 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 16:14:46 crc kubenswrapper[4954]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0" Netns:"/var/run/netns/9407986b-4a31-4b26-b9ae-b8db4869933a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s": dial tcp 38.102.83.173:6443: connect: connection refused Nov 28 16:14:46 crc kubenswrapper[4954]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 16:14:46 crc kubenswrapper[4954]: > pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:14:46 crc kubenswrapper[4954]: E1128 16:14:46.503906 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-55c5844bb-z65ps_openshift-authentication_041553f2-67ec-493e-84e3-be25a234f5d6_0(ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0): error adding pod openshift-authentication_oauth-openshift-55c5844bb-z65ps to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0\\\" Netns:\\\"/var/run/netns/9407986b-4a31-4b26-b9ae-b8db4869933a\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-55c5844bb-z65ps;K8S_POD_INFRA_CONTAINER_ID=ab73625475eca120464d0809894244ed7690dcbbfcd551305a30c25d7bf2a6f0;K8S_POD_UID=041553f2-67ec-493e-84e3-be25a234f5d6\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-55c5844bb-z65ps] networking: Multus: [openshift-authentication/oauth-openshift-55c5844bb-z65ps/041553f2-67ec-493e-84e3-be25a234f5d6]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: SetNetworkStatus: failed to update the pod oauth-openshift-55c5844bb-z65ps in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-55c5844bb-z65ps?timeout=1m0s\\\": dial tcp 38.102.83.173:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:14:46 crc kubenswrapper[4954]: I1128 16:14:46.940405 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0667c8d889acdb014628f9ed7473a07861eadf97a3159a778a8a25b278cc3955"} Nov 28 16:14:46 crc kubenswrapper[4954]: I1128 16:14:46.940452 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b2ff866f9f1aecd566cc31631a5e1c12caa82f0b5b34659f3c25d3563a373e68"} Nov 28 16:14:46 crc kubenswrapper[4954]: I1128 16:14:46.951931 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 16:14:46 crc kubenswrapper[4954]: I1128 16:14:46.951974 4954 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464" exitCode=1 Nov 28 16:14:46 crc kubenswrapper[4954]: I1128 16:14:46.952006 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464"} Nov 28 16:14:46 crc kubenswrapper[4954]: I1128 16:14:46.952460 4954 scope.go:117] "RemoveContainer" containerID="e0d0bcd7dff5d339b6a743dd53e8b52d9a1350a05073ce785893ec2409f08464" Nov 28 16:14:47 crc kubenswrapper[4954]: I1128 16:14:47.962266 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"32455f4ec63126696792f0dc123d45f73f884e53498b3b83dffd259398f3ce2e"} Nov 28 16:14:47 crc kubenswrapper[4954]: I1128 16:14:47.962730 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"55876654253cda7b500c1f0cec946ad95d0fe8536ca3d5b6f983bb2aceb9a04f"} Nov 28 16:14:47 crc kubenswrapper[4954]: I1128 16:14:47.965513 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 16:14:47 crc kubenswrapper[4954]: I1128 16:14:47.965619 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"51561487a0d0f74e76f8ce2fba010ff066ca6b769decb5925da475afaf0cfc73"} Nov 28 16:14:48 crc kubenswrapper[4954]: I1128 16:14:48.975715 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0618be1c13e491b6d9f3ea3a8ac8ea16594ccf1f688824873bf2764a95f0d38f"} Nov 28 16:14:48 crc kubenswrapper[4954]: I1128 16:14:48.975922 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:48 crc kubenswrapper[4954]: I1128 16:14:48.976050 4954 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:48 crc kubenswrapper[4954]: I1128 16:14:48.976078 4954 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:49 crc kubenswrapper[4954]: I1128 16:14:49.873223 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:49 crc kubenswrapper[4954]: I1128 16:14:49.873493 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:49 crc kubenswrapper[4954]: I1128 16:14:49.883274 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:51 crc kubenswrapper[4954]: I1128 16:14:51.196620 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:14:51 crc kubenswrapper[4954]: I1128 16:14:51.196739 4954 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 16:14:51 crc kubenswrapper[4954]: I1128 16:14:51.197183 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 16:14:51 crc kubenswrapper[4954]: I1128 16:14:51.903086 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:14:53 crc kubenswrapper[4954]: I1128 16:14:53.995415 4954 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:54 crc kubenswrapper[4954]: I1128 16:14:54.880063 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:14:54 crc kubenswrapper[4954]: I1128 16:14:54.883550 4954 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9f3af645-f362-45fc-a656-e0d9455d10e7" Nov 28 16:14:55 crc kubenswrapper[4954]: I1128 16:14:55.011000 4954 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:55 crc kubenswrapper[4954]: I1128 16:14:55.011039 4954 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:56 crc kubenswrapper[4954]: I1128 16:14:56.018473 4954 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:56 crc kubenswrapper[4954]: I1128 16:14:56.018515 4954 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0cfded82-34af-4634-9e29-40a7509efd7e" Nov 28 16:14:57 crc kubenswrapper[4954]: I1128 16:14:57.875210 4954 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9f3af645-f362-45fc-a656-e0d9455d10e7" Nov 28 16:15:01 crc kubenswrapper[4954]: I1128 16:15:01.196449 4954 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 16:15:01 crc kubenswrapper[4954]: I1128 16:15:01.196786 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 16:15:01 crc kubenswrapper[4954]: I1128 16:15:01.856298 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:01 crc kubenswrapper[4954]: I1128 16:15:01.857392 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:02 crc kubenswrapper[4954]: I1128 16:15:02.320517 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.063650 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-55c5844bb-z65ps_041553f2-67ec-493e-84e3-be25a234f5d6/oauth-openshift/0.log" Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.064261 4954 generic.go:334] "Generic (PLEG): container finished" podID="041553f2-67ec-493e-84e3-be25a234f5d6" containerID="8057a656e739edb54880d8ebfddab65f72e7dad041d429698c086625624ba7f6" exitCode=255 Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.064308 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" event={"ID":"041553f2-67ec-493e-84e3-be25a234f5d6","Type":"ContainerDied","Data":"8057a656e739edb54880d8ebfddab65f72e7dad041d429698c086625624ba7f6"} Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.064349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" event={"ID":"041553f2-67ec-493e-84e3-be25a234f5d6","Type":"ContainerStarted","Data":"93472bf5261a1986bafcca52236f71743dff21579fdcfafabb6cbee6e9db51bb"} Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.065135 4954 scope.go:117] "RemoveContainer" containerID="8057a656e739edb54880d8ebfddab65f72e7dad041d429698c086625624ba7f6" Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.122033 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.122086 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:03 crc kubenswrapper[4954]: I1128 16:15:03.635216 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.029973 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.075022 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-55c5844bb-z65ps_041553f2-67ec-493e-84e3-be25a234f5d6/oauth-openshift/1.log" Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.075681 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-55c5844bb-z65ps_041553f2-67ec-493e-84e3-be25a234f5d6/oauth-openshift/0.log" Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.075763 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" event={"ID":"041553f2-67ec-493e-84e3-be25a234f5d6","Type":"ContainerStarted","Data":"d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e"} Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.076473 4954 scope.go:117] "RemoveContainer" containerID="d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e" Nov 28 16:15:04 crc kubenswrapper[4954]: E1128 16:15:04.076940 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.444377 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.571764 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 16:15:04 crc kubenswrapper[4954]: I1128 16:15:04.764776 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.084654 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-55c5844bb-z65ps_041553f2-67ec-493e-84e3-be25a234f5d6/oauth-openshift/1.log" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.085593 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-55c5844bb-z65ps_041553f2-67ec-493e-84e3-be25a234f5d6/oauth-openshift/0.log" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.085652 4954 generic.go:334] "Generic (PLEG): container finished" podID="041553f2-67ec-493e-84e3-be25a234f5d6" containerID="d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e" exitCode=255 Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.085693 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" event={"ID":"041553f2-67ec-493e-84e3-be25a234f5d6","Type":"ContainerDied","Data":"d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e"} Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.085733 4954 scope.go:117] "RemoveContainer" containerID="8057a656e739edb54880d8ebfddab65f72e7dad041d429698c086625624ba7f6" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.086209 4954 scope.go:117] "RemoveContainer" containerID="d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e" Nov 28 16:15:05 crc kubenswrapper[4954]: E1128 16:15:05.086467 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.588413 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.760347 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.828913 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.842273 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.851296 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 16:15:05 crc kubenswrapper[4954]: I1128 16:15:05.922814 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.093788 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.096361 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-55c5844bb-z65ps_041553f2-67ec-493e-84e3-be25a234f5d6/oauth-openshift/1.log" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.097444 4954 scope.go:117] "RemoveContainer" containerID="d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e" Nov 28 16:15:06 crc kubenswrapper[4954]: E1128 16:15:06.097830 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.140977 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.361486 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.519386 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.545694 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.556083 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.743501 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.923343 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 16:15:06 crc kubenswrapper[4954]: I1128 16:15:06.929096 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.009899 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.176978 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.219594 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.282250 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.322868 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.402200 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.419797 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.444345 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.446346 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.466698 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.651405 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.762476 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.827468 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.881035 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.898643 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 16:15:07 crc kubenswrapper[4954]: I1128 16:15:07.931378 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.008134 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.227381 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.234969 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.415830 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.500867 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.516875 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.625188 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.651087 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.731453 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.747607 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.762084 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.798566 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.966389 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 16:15:08 crc kubenswrapper[4954]: I1128 16:15:08.966415 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.080460 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.104138 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.117156 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.136738 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.138391 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.148464 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.171821 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.215746 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.333235 4954 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.334930 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.367367 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.421852 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.594271 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.659828 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.720453 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.789866 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.810302 4954 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.837846 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.854183 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 16:15:09 crc kubenswrapper[4954]: I1128 16:15:09.886441 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.079497 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.105107 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.133602 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.200870 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.211009 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.332431 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.476754 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.566114 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.666714 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.704122 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.733590 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.745453 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.758424 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.779869 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.817937 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.824717 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.857467 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.878896 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.895185 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 16:15:10 crc kubenswrapper[4954]: I1128 16:15:10.962367 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.029242 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.088815 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.193972 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.194357 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.200782 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.208397 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.280989 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.334136 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.400916 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.538053 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.574212 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.614601 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.664548 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.742040 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 16:15:11 crc kubenswrapper[4954]: I1128 16:15:11.930033 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.000491 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.001368 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.044360 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.088685 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.144123 4954 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.162586 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.165488 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.194474 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.194580 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.203176 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.211360 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.268766 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.310681 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.560096 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.577314 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.596990 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.608138 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.705808 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.803210 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.872269 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.920483 4954 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.925747 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-marketplace/community-operators-l49q6"] Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.925823 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.925848 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-55c5844bb-z65ps"] Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.926427 4954 scope.go:117] "RemoveContainer" containerID="d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e" Nov 28 16:15:12 crc kubenswrapper[4954]: E1128 16:15:12.926669 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.930250 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 16:15:12 crc kubenswrapper[4954]: I1128 16:15:12.945503 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=18.945481752 podStartE2EDuration="18.945481752s" podCreationTimestamp="2025-11-28 16:14:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:12.941760478 +0000 UTC m=+266.333429029" watchObservedRunningTime="2025-11-28 16:15:12.945481752 +0000 UTC m=+266.337150303" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.093456 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.114917 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.122221 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.122292 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.150126 4954 scope.go:117] "RemoveContainer" containerID="d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e" Nov 28 16:15:13 crc kubenswrapper[4954]: E1128 16:15:13.150457 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-55c5844bb-z65ps_openshift-authentication(041553f2-67ec-493e-84e3-be25a234f5d6)\"" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podUID="041553f2-67ec-493e-84e3-be25a234f5d6" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.165693 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.183019 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.222386 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.267057 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.341694 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.369036 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.398461 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.454018 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.464165 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.551200 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.574949 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.651183 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.664558 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.673642 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.715634 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.731654 4954 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.752813 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.769470 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.865079 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" path="/var/lib/kubelet/pods/a6c3823f-faef-4429-b6a0-a9c8678b7358/volumes" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.876559 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.901723 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 16:15:13 crc kubenswrapper[4954]: I1128 16:15:13.972674 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.086244 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.102505 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.161980 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.173300 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.174344 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.182846 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.188213 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.213587 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.243476 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.264322 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.272837 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.325512 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.437235 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.507259 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.613218 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.652550 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.709171 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.722390 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.723075 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.766116 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.771339 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.777117 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.845621 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.896660 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.949208 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 16:15:14 crc kubenswrapper[4954]: I1128 16:15:14.988867 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.082131 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.083405 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.171385 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.193911 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.287970 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.301740 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.329608 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.339779 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.381794 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.388453 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.461833 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.557287 4954 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.557934 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3" gracePeriod=5 Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.679931 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.799274 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.883075 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.926286 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 16:15:15 crc kubenswrapper[4954]: I1128 16:15:15.951583 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.048077 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.086757 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.129051 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.143912 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.170652 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.215230 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.245025 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.441062 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.624986 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.870127 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.946512 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.949331 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.988395 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.991682 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 16:15:16 crc kubenswrapper[4954]: I1128 16:15:16.996974 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.006417 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.118763 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.120241 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.148185 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.176430 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.266639 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.356475 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.370048 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.392667 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.466062 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.536726 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.546931 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.554645 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.592322 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.631514 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.670108 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 16:15:17 crc kubenswrapper[4954]: I1128 16:15:17.771259 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.179855 4954 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.227474 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.261874 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.550804 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.585986 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.590142 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.629517 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.686912 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.702013 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.866494 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 16:15:18 crc kubenswrapper[4954]: I1128 16:15:18.871650 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.124975 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.136707 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.197319 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.417094 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.452093 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.465950 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.668174 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.729212 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.833479 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 16:15:19 crc kubenswrapper[4954]: I1128 16:15:19.982952 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 16:15:20 crc kubenswrapper[4954]: I1128 16:15:20.082464 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:15:20 crc kubenswrapper[4954]: I1128 16:15:20.088308 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 16:15:20 crc kubenswrapper[4954]: I1128 16:15:20.145906 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 16:15:20 crc kubenswrapper[4954]: I1128 16:15:20.385951 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 16:15:20 crc kubenswrapper[4954]: I1128 16:15:20.425946 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 16:15:20 crc kubenswrapper[4954]: I1128 16:15:20.554875 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.136683 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.143270 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.143486 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.197584 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.197648 4954 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3" exitCode=137 Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.197700 4954 scope.go:117] "RemoveContainer" containerID="b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.197777 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.216311 4954 scope.go:117] "RemoveContainer" containerID="b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3" Nov 28 16:15:21 crc kubenswrapper[4954]: E1128 16:15:21.216709 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3\": container with ID starting with b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3 not found: ID does not exist" containerID="b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.216736 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3"} err="failed to get container status \"b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3\": rpc error: code = NotFound desc = could not find container \"b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3\": container with ID starting with b0381e54669faa5689a088c204eb7a25d1ade159d147fa94368ade9bc9a651b3 not found: ID does not exist" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.308973 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309106 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309146 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309194 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309285 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309679 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309731 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309765 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.309796 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.318358 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.321970 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.327323 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.411406 4954 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.411999 4954 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.412121 4954 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.412154 4954 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.412181 4954 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:21 crc kubenswrapper[4954]: I1128 16:15:21.865318 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.051490 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5"] Nov 28 16:15:22 crc kubenswrapper[4954]: E1128 16:15:22.051788 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="extract-utilities" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.051805 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="extract-utilities" Nov 28 16:15:22 crc kubenswrapper[4954]: E1128 16:15:22.051825 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="registry-server" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.051834 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="registry-server" Nov 28 16:15:22 crc kubenswrapper[4954]: E1128 16:15:22.051849 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="extract-content" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.051858 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="extract-content" Nov 28 16:15:22 crc kubenswrapper[4954]: E1128 16:15:22.051871 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.051879 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 16:15:22 crc kubenswrapper[4954]: E1128 16:15:22.051893 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="988f7288-0247-4444-8d30-71cfa932aa33" containerName="installer" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.051901 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="988f7288-0247-4444-8d30-71cfa932aa33" containerName="installer" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.052037 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="988f7288-0247-4444-8d30-71cfa932aa33" containerName="installer" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.052055 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.052072 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6c3823f-faef-4429-b6a0-a9c8678b7358" containerName="registry-server" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.052490 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.054052 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.054052 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.059013 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5"] Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.221704 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxrkc\" (UniqueName: \"kubernetes.io/projected/5f729fc1-46e1-42a2-8def-3f831c28b8b0-kube-api-access-qxrkc\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.221759 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f729fc1-46e1-42a2-8def-3f831c28b8b0-secret-volume\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.221813 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f729fc1-46e1-42a2-8def-3f831c28b8b0-config-volume\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.323031 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f729fc1-46e1-42a2-8def-3f831c28b8b0-secret-volume\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.323144 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f729fc1-46e1-42a2-8def-3f831c28b8b0-config-volume\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.323250 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxrkc\" (UniqueName: \"kubernetes.io/projected/5f729fc1-46e1-42a2-8def-3f831c28b8b0-kube-api-access-qxrkc\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.324340 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f729fc1-46e1-42a2-8def-3f831c28b8b0-config-volume\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.330454 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f729fc1-46e1-42a2-8def-3f831c28b8b0-secret-volume\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.359516 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxrkc\" (UniqueName: \"kubernetes.io/projected/5f729fc1-46e1-42a2-8def-3f831c28b8b0-kube-api-access-qxrkc\") pod \"collect-profiles-29405775-5plw5\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.367299 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:22 crc kubenswrapper[4954]: I1128 16:15:22.806327 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5"] Nov 28 16:15:23 crc kubenswrapper[4954]: I1128 16:15:23.210154 4954 generic.go:334] "Generic (PLEG): container finished" podID="5f729fc1-46e1-42a2-8def-3f831c28b8b0" containerID="5191d0199d9849d8b93b9c23f03cfb1f462b0caf1e5d3fbbbc7d7e4d2c739605" exitCode=0 Nov 28 16:15:23 crc kubenswrapper[4954]: I1128 16:15:23.210317 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" event={"ID":"5f729fc1-46e1-42a2-8def-3f831c28b8b0","Type":"ContainerDied","Data":"5191d0199d9849d8b93b9c23f03cfb1f462b0caf1e5d3fbbbc7d7e4d2c739605"} Nov 28 16:15:23 crc kubenswrapper[4954]: I1128 16:15:23.210470 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" event={"ID":"5f729fc1-46e1-42a2-8def-3f831c28b8b0","Type":"ContainerStarted","Data":"760c10343d4c460a7b59c6afcccefc610944e7cc0518d49279d99acd3ec4b191"} Nov 28 16:15:23 crc kubenswrapper[4954]: I1128 16:15:23.855660 4954 scope.go:117] "RemoveContainer" containerID="d7a2de84a1b05ca215bf41392fbd9a420d141ff552cbfd727b5f749e832d3c9e" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.217566 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-55c5844bb-z65ps_041553f2-67ec-493e-84e3-be25a234f5d6/oauth-openshift/1.log" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.217985 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" event={"ID":"041553f2-67ec-493e-84e3-be25a234f5d6","Type":"ContainerStarted","Data":"cb7cac9579dfb7dd6cf9a3de2de4f324c562ed027f1438271ed769b437902f12"} Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.218374 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.239680 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" podStartSLOduration=78.239663203 podStartE2EDuration="1m18.239663203s" podCreationTimestamp="2025-11-28 16:14:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:24.237259869 +0000 UTC m=+277.628928420" watchObservedRunningTime="2025-11-28 16:15:24.239663203 +0000 UTC m=+277.631331744" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.417947 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.549657 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f729fc1-46e1-42a2-8def-3f831c28b8b0-config-volume\") pod \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.549749 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxrkc\" (UniqueName: \"kubernetes.io/projected/5f729fc1-46e1-42a2-8def-3f831c28b8b0-kube-api-access-qxrkc\") pod \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.549853 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f729fc1-46e1-42a2-8def-3f831c28b8b0-secret-volume\") pod \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\" (UID: \"5f729fc1-46e1-42a2-8def-3f831c28b8b0\") " Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.550606 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f729fc1-46e1-42a2-8def-3f831c28b8b0-config-volume" (OuterVolumeSpecName: "config-volume") pod "5f729fc1-46e1-42a2-8def-3f831c28b8b0" (UID: "5f729fc1-46e1-42a2-8def-3f831c28b8b0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.557912 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f729fc1-46e1-42a2-8def-3f831c28b8b0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5f729fc1-46e1-42a2-8def-3f831c28b8b0" (UID: "5f729fc1-46e1-42a2-8def-3f831c28b8b0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.558616 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f729fc1-46e1-42a2-8def-3f831c28b8b0-kube-api-access-qxrkc" (OuterVolumeSpecName: "kube-api-access-qxrkc") pod "5f729fc1-46e1-42a2-8def-3f831c28b8b0" (UID: "5f729fc1-46e1-42a2-8def-3f831c28b8b0"). InnerVolumeSpecName "kube-api-access-qxrkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.651772 4954 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f729fc1-46e1-42a2-8def-3f831c28b8b0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.651837 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f729fc1-46e1-42a2-8def-3f831c28b8b0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.651859 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxrkc\" (UniqueName: \"kubernetes.io/projected/5f729fc1-46e1-42a2-8def-3f831c28b8b0-kube-api-access-qxrkc\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:24 crc kubenswrapper[4954]: I1128 16:15:24.656514 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-55c5844bb-z65ps" Nov 28 16:15:25 crc kubenswrapper[4954]: I1128 16:15:25.227154 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" Nov 28 16:15:25 crc kubenswrapper[4954]: I1128 16:15:25.227168 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5" event={"ID":"5f729fc1-46e1-42a2-8def-3f831c28b8b0","Type":"ContainerDied","Data":"760c10343d4c460a7b59c6afcccefc610944e7cc0518d49279d99acd3ec4b191"} Nov 28 16:15:25 crc kubenswrapper[4954]: I1128 16:15:25.227247 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="760c10343d4c460a7b59c6afcccefc610944e7cc0518d49279d99acd3ec4b191" Nov 28 16:15:38 crc kubenswrapper[4954]: I1128 16:15:38.930732 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ct8cs"] Nov 28 16:15:38 crc kubenswrapper[4954]: I1128 16:15:38.933668 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ct8cs" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="registry-server" containerID="cri-o://58863e7d9af0800ddd854dad3d55bcaa565d7a0a953fe9f1b3aa174439e07a15" gracePeriod=2 Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.137227 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zjx9w"] Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.137687 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zjx9w" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="registry-server" containerID="cri-o://11f0d06cd920242e5daa274cd34dd62ceba1e157d6beefae783e6c683a337223" gracePeriod=2 Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.322299 4954 generic.go:334] "Generic (PLEG): container finished" podID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerID="58863e7d9af0800ddd854dad3d55bcaa565d7a0a953fe9f1b3aa174439e07a15" exitCode=0 Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.322379 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ct8cs" event={"ID":"e7ab6809-c0bf-4b18-a7c8-9babc98adec1","Type":"ContainerDied","Data":"58863e7d9af0800ddd854dad3d55bcaa565d7a0a953fe9f1b3aa174439e07a15"} Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.325300 4954 generic.go:334] "Generic (PLEG): container finished" podID="1e1da481-691f-4d64-8229-c8846cd6a778" containerID="11f0d06cd920242e5daa274cd34dd62ceba1e157d6beefae783e6c683a337223" exitCode=0 Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.325359 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjx9w" event={"ID":"1e1da481-691f-4d64-8229-c8846cd6a778","Type":"ContainerDied","Data":"11f0d06cd920242e5daa274cd34dd62ceba1e157d6beefae783e6c683a337223"} Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.796292 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.981951 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-utilities\") pod \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.982003 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjznp\" (UniqueName: \"kubernetes.io/projected/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-kube-api-access-xjznp\") pod \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.982044 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-catalog-content\") pod \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\" (UID: \"e7ab6809-c0bf-4b18-a7c8-9babc98adec1\") " Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.983151 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-utilities" (OuterVolumeSpecName: "utilities") pod "e7ab6809-c0bf-4b18-a7c8-9babc98adec1" (UID: "e7ab6809-c0bf-4b18-a7c8-9babc98adec1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.989419 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:15:39 crc kubenswrapper[4954]: I1128 16:15:39.992294 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-kube-api-access-xjznp" (OuterVolumeSpecName: "kube-api-access-xjznp") pod "e7ab6809-c0bf-4b18-a7c8-9babc98adec1" (UID: "e7ab6809-c0bf-4b18-a7c8-9babc98adec1"). InnerVolumeSpecName "kube-api-access-xjznp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.003447 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e7ab6809-c0bf-4b18-a7c8-9babc98adec1" (UID: "e7ab6809-c0bf-4b18-a7c8-9babc98adec1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.083432 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.083479 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjznp\" (UniqueName: \"kubernetes.io/projected/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-kube-api-access-xjznp\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.083498 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7ab6809-c0bf-4b18-a7c8-9babc98adec1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.184450 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6w7r\" (UniqueName: \"kubernetes.io/projected/1e1da481-691f-4d64-8229-c8846cd6a778-kube-api-access-x6w7r\") pod \"1e1da481-691f-4d64-8229-c8846cd6a778\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.184559 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-catalog-content\") pod \"1e1da481-691f-4d64-8229-c8846cd6a778\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.184757 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-utilities\") pod \"1e1da481-691f-4d64-8229-c8846cd6a778\" (UID: \"1e1da481-691f-4d64-8229-c8846cd6a778\") " Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.185806 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-utilities" (OuterVolumeSpecName: "utilities") pod "1e1da481-691f-4d64-8229-c8846cd6a778" (UID: "1e1da481-691f-4d64-8229-c8846cd6a778"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.187782 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e1da481-691f-4d64-8229-c8846cd6a778-kube-api-access-x6w7r" (OuterVolumeSpecName: "kube-api-access-x6w7r") pod "1e1da481-691f-4d64-8229-c8846cd6a778" (UID: "1e1da481-691f-4d64-8229-c8846cd6a778"). InnerVolumeSpecName "kube-api-access-x6w7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.286256 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.286316 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6w7r\" (UniqueName: \"kubernetes.io/projected/1e1da481-691f-4d64-8229-c8846cd6a778-kube-api-access-x6w7r\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.298497 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1e1da481-691f-4d64-8229-c8846cd6a778" (UID: "1e1da481-691f-4d64-8229-c8846cd6a778"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.333206 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjx9w" event={"ID":"1e1da481-691f-4d64-8229-c8846cd6a778","Type":"ContainerDied","Data":"643f161019ecceee86fc83812fff68c0bef9b8941ca07b7189ec877706aa4268"} Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.333353 4954 scope.go:117] "RemoveContainer" containerID="11f0d06cd920242e5daa274cd34dd62ceba1e157d6beefae783e6c683a337223" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.333510 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjx9w" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.341051 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ct8cs" event={"ID":"e7ab6809-c0bf-4b18-a7c8-9babc98adec1","Type":"ContainerDied","Data":"e70de9f0cba66dfbf16ee743342c8cfed72916e09eedbe79a6aec81e4107c769"} Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.341106 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ct8cs" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.358458 4954 scope.go:117] "RemoveContainer" containerID="2f40e73ac78483ad22c7b5025f36722644fb381a64ed62ea99017ec9129a1c20" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.373730 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zjx9w"] Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.381087 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zjx9w"] Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.385454 4954 scope.go:117] "RemoveContainer" containerID="a9047fec4c7dbc09be5959d7fe64d645904028c7a63e0971d260bb088618c586" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.386782 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ct8cs"] Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.387457 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e1da481-691f-4d64-8229-c8846cd6a778-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.392238 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ct8cs"] Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.405472 4954 scope.go:117] "RemoveContainer" containerID="58863e7d9af0800ddd854dad3d55bcaa565d7a0a953fe9f1b3aa174439e07a15" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.425366 4954 scope.go:117] "RemoveContainer" containerID="026bcb830696df0a2081e3bd94d8571f5d5e16178e4ddce960a57a9d46f39ce1" Nov 28 16:15:40 crc kubenswrapper[4954]: I1128 16:15:40.439991 4954 scope.go:117] "RemoveContainer" containerID="20dffeeb4ab7a48399ac640962fcdcc2e09a5c42a6f687d4559485df5c7dc954" Nov 28 16:15:41 crc kubenswrapper[4954]: I1128 16:15:41.862757 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" path="/var/lib/kubelet/pods/1e1da481-691f-4d64-8229-c8846cd6a778/volumes" Nov 28 16:15:41 crc kubenswrapper[4954]: I1128 16:15:41.863348 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" path="/var/lib/kubelet/pods/e7ab6809-c0bf-4b18-a7c8-9babc98adec1/volumes" Nov 28 16:15:43 crc kubenswrapper[4954]: I1128 16:15:43.966080 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2ckdw"] Nov 28 16:15:43 crc kubenswrapper[4954]: I1128 16:15:43.967215 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" podUID="76c97c39-16a9-4231-ade3-903f2eb8b4be" containerName="controller-manager" containerID="cri-o://fb88fdfb0ee734d4683587f69f3ea731ea17b6e739b670bec69ec99a2c40f145" gracePeriod=30 Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.064009 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj"] Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.064206 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" podUID="db28b581-1553-4c72-a11f-fb4c3d67a33f" containerName="route-controller-manager" containerID="cri-o://8e2ee78f80be5f739a13736ea894733f637c253ff89974f55cfb180b3da3ce0b" gracePeriod=30 Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.365827 4954 generic.go:334] "Generic (PLEG): container finished" podID="db28b581-1553-4c72-a11f-fb4c3d67a33f" containerID="8e2ee78f80be5f739a13736ea894733f637c253ff89974f55cfb180b3da3ce0b" exitCode=0 Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.366105 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" event={"ID":"db28b581-1553-4c72-a11f-fb4c3d67a33f","Type":"ContainerDied","Data":"8e2ee78f80be5f739a13736ea894733f637c253ff89974f55cfb180b3da3ce0b"} Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.378132 4954 generic.go:334] "Generic (PLEG): container finished" podID="76c97c39-16a9-4231-ade3-903f2eb8b4be" containerID="fb88fdfb0ee734d4683587f69f3ea731ea17b6e739b670bec69ec99a2c40f145" exitCode=0 Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.378176 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" event={"ID":"76c97c39-16a9-4231-ade3-903f2eb8b4be","Type":"ContainerDied","Data":"fb88fdfb0ee734d4683587f69f3ea731ea17b6e739b670bec69ec99a2c40f145"} Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.447470 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.543165 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.550920 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-config\") pod \"76c97c39-16a9-4231-ade3-903f2eb8b4be\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.550997 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-config\") pod \"db28b581-1553-4c72-a11f-fb4c3d67a33f\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.551026 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-client-ca\") pod \"76c97c39-16a9-4231-ade3-903f2eb8b4be\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.551076 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlphn\" (UniqueName: \"kubernetes.io/projected/db28b581-1553-4c72-a11f-fb4c3d67a33f-kube-api-access-nlphn\") pod \"db28b581-1553-4c72-a11f-fb4c3d67a33f\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.551112 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-proxy-ca-bundles\") pod \"76c97c39-16a9-4231-ade3-903f2eb8b4be\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.551155 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76c97c39-16a9-4231-ade3-903f2eb8b4be-serving-cert\") pod \"76c97c39-16a9-4231-ade3-903f2eb8b4be\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.551179 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db28b581-1553-4c72-a11f-fb4c3d67a33f-serving-cert\") pod \"db28b581-1553-4c72-a11f-fb4c3d67a33f\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.551247 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-client-ca\") pod \"db28b581-1553-4c72-a11f-fb4c3d67a33f\" (UID: \"db28b581-1553-4c72-a11f-fb4c3d67a33f\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.551324 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qllgz\" (UniqueName: \"kubernetes.io/projected/76c97c39-16a9-4231-ade3-903f2eb8b4be-kube-api-access-qllgz\") pod \"76c97c39-16a9-4231-ade3-903f2eb8b4be\" (UID: \"76c97c39-16a9-4231-ade3-903f2eb8b4be\") " Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.554556 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-config" (OuterVolumeSpecName: "config") pod "db28b581-1553-4c72-a11f-fb4c3d67a33f" (UID: "db28b581-1553-4c72-a11f-fb4c3d67a33f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.558494 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-config" (OuterVolumeSpecName: "config") pod "76c97c39-16a9-4231-ade3-903f2eb8b4be" (UID: "76c97c39-16a9-4231-ade3-903f2eb8b4be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.560683 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db28b581-1553-4c72-a11f-fb4c3d67a33f-kube-api-access-nlphn" (OuterVolumeSpecName: "kube-api-access-nlphn") pod "db28b581-1553-4c72-a11f-fb4c3d67a33f" (UID: "db28b581-1553-4c72-a11f-fb4c3d67a33f"). InnerVolumeSpecName "kube-api-access-nlphn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.561999 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-client-ca" (OuterVolumeSpecName: "client-ca") pod "76c97c39-16a9-4231-ade3-903f2eb8b4be" (UID: "76c97c39-16a9-4231-ade3-903f2eb8b4be"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.562221 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76c97c39-16a9-4231-ade3-903f2eb8b4be-kube-api-access-qllgz" (OuterVolumeSpecName: "kube-api-access-qllgz") pod "76c97c39-16a9-4231-ade3-903f2eb8b4be" (UID: "76c97c39-16a9-4231-ade3-903f2eb8b4be"). InnerVolumeSpecName "kube-api-access-qllgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.563012 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "76c97c39-16a9-4231-ade3-903f2eb8b4be" (UID: "76c97c39-16a9-4231-ade3-903f2eb8b4be"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.563277 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-client-ca" (OuterVolumeSpecName: "client-ca") pod "db28b581-1553-4c72-a11f-fb4c3d67a33f" (UID: "db28b581-1553-4c72-a11f-fb4c3d67a33f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.563468 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76c97c39-16a9-4231-ade3-903f2eb8b4be-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "76c97c39-16a9-4231-ade3-903f2eb8b4be" (UID: "76c97c39-16a9-4231-ade3-903f2eb8b4be"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.568813 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db28b581-1553-4c72-a11f-fb4c3d67a33f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "db28b581-1553-4c72-a11f-fb4c3d67a33f" (UID: "db28b581-1553-4c72-a11f-fb4c3d67a33f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653154 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653195 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qllgz\" (UniqueName: \"kubernetes.io/projected/76c97c39-16a9-4231-ade3-903f2eb8b4be-kube-api-access-qllgz\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653207 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653216 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db28b581-1553-4c72-a11f-fb4c3d67a33f-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653227 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653236 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlphn\" (UniqueName: \"kubernetes.io/projected/db28b581-1553-4c72-a11f-fb4c3d67a33f-kube-api-access-nlphn\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653244 4954 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/76c97c39-16a9-4231-ade3-903f2eb8b4be-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653252 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76c97c39-16a9-4231-ade3-903f2eb8b4be-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:44 crc kubenswrapper[4954]: I1128 16:15:44.653260 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db28b581-1553-4c72-a11f-fb4c3d67a33f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.379780 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-k7qkf"] Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.379971 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="extract-content" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.379983 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="extract-content" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.379994 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f729fc1-46e1-42a2-8def-3f831c28b8b0" containerName="collect-profiles" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380000 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f729fc1-46e1-42a2-8def-3f831c28b8b0" containerName="collect-profiles" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.380008 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="extract-content" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380014 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="extract-content" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.380024 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="registry-server" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380030 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="registry-server" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.380039 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db28b581-1553-4c72-a11f-fb4c3d67a33f" containerName="route-controller-manager" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380044 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="db28b581-1553-4c72-a11f-fb4c3d67a33f" containerName="route-controller-manager" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.380052 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c97c39-16a9-4231-ade3-903f2eb8b4be" containerName="controller-manager" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380057 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c97c39-16a9-4231-ade3-903f2eb8b4be" containerName="controller-manager" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.380066 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="registry-server" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380072 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="registry-server" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.380080 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="extract-utilities" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380087 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="extract-utilities" Nov 28 16:15:45 crc kubenswrapper[4954]: E1128 16:15:45.380096 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="extract-utilities" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380101 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="extract-utilities" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380181 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f729fc1-46e1-42a2-8def-3f831c28b8b0" containerName="collect-profiles" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380191 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="db28b581-1553-4c72-a11f-fb4c3d67a33f" containerName="route-controller-manager" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380199 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7ab6809-c0bf-4b18-a7c8-9babc98adec1" containerName="registry-server" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380208 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="76c97c39-16a9-4231-ade3-903f2eb8b4be" containerName="controller-manager" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380216 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e1da481-691f-4d64-8229-c8846cd6a778" containerName="registry-server" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.380548 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.386435 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" event={"ID":"db28b581-1553-4c72-a11f-fb4c3d67a33f","Type":"ContainerDied","Data":"bdf99ee384473f946ab3985744836582b11bc412da204e519c3ed245a0e50780"} Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.386478 4954 scope.go:117] "RemoveContainer" containerID="8e2ee78f80be5f739a13736ea894733f637c253ff89974f55cfb180b3da3ce0b" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.386596 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.388111 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" event={"ID":"76c97c39-16a9-4231-ade3-903f2eb8b4be","Type":"ContainerDied","Data":"35b701abcbc31d4e57bfce215793cd6b17204e941b68e028a8e2869ba40423d2"} Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.388135 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2ckdw" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.400174 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-k7qkf"] Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.409835 4954 scope.go:117] "RemoveContainer" containerID="fb88fdfb0ee734d4683587f69f3ea731ea17b6e739b670bec69ec99a2c40f145" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.421845 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p"] Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.422707 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.436165 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.436349 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.440992 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.442096 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.442316 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.446563 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.453591 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p"] Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.462423 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-proxy-ca-bundles\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.462476 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-client-ca\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.462504 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-serving-cert\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.462665 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/322cd57c-f334-48bd-ae4b-d949494360ee-serving-cert\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.462787 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-client-ca\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.462896 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ljqs\" (UniqueName: \"kubernetes.io/projected/322cd57c-f334-48bd-ae4b-d949494360ee-kube-api-access-7ljqs\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.462940 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-config\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.463013 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnwjq\" (UniqueName: \"kubernetes.io/projected/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-kube-api-access-jnwjq\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.463107 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-config\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.524614 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2ckdw"] Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.537140 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2ckdw"] Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.550384 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj"] Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.559002 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-bdctj"] Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564604 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-proxy-ca-bundles\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564668 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-client-ca\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564697 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-serving-cert\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564729 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/322cd57c-f334-48bd-ae4b-d949494360ee-serving-cert\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564761 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-client-ca\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564807 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ljqs\" (UniqueName: \"kubernetes.io/projected/322cd57c-f334-48bd-ae4b-d949494360ee-kube-api-access-7ljqs\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564836 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-config\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564869 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnwjq\" (UniqueName: \"kubernetes.io/projected/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-kube-api-access-jnwjq\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.564910 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-config\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.566341 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-proxy-ca-bundles\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.566486 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-config\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.567041 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-client-ca\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.567633 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-client-ca\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.567872 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-config\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.572853 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/322cd57c-f334-48bd-ae4b-d949494360ee-serving-cert\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.573407 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-serving-cert\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.602121 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ljqs\" (UniqueName: \"kubernetes.io/projected/322cd57c-f334-48bd-ae4b-d949494360ee-kube-api-access-7ljqs\") pod \"controller-manager-55bf64c844-k7qkf\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.605833 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnwjq\" (UniqueName: \"kubernetes.io/projected/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-kube-api-access-jnwjq\") pod \"route-controller-manager-56b6ff4c67-lw44p\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.714172 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.747111 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.867474 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76c97c39-16a9-4231-ade3-903f2eb8b4be" path="/var/lib/kubelet/pods/76c97c39-16a9-4231-ade3-903f2eb8b4be/volumes" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.867977 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db28b581-1553-4c72-a11f-fb4c3d67a33f" path="/var/lib/kubelet/pods/db28b581-1553-4c72-a11f-fb4c3d67a33f/volumes" Nov 28 16:15:45 crc kubenswrapper[4954]: I1128 16:15:45.950851 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-k7qkf"] Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.239599 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p"] Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.399637 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" event={"ID":"322cd57c-f334-48bd-ae4b-d949494360ee","Type":"ContainerStarted","Data":"7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5"} Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.399690 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" event={"ID":"322cd57c-f334-48bd-ae4b-d949494360ee","Type":"ContainerStarted","Data":"53b1415e6619019aed054d6469c3da5535568333e7d4204aea91994460d5aac1"} Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.400051 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.402462 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" event={"ID":"e3564534-68d8-4e2e-bfb5-fbe02af59b5e","Type":"ContainerStarted","Data":"51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419"} Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.402504 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" event={"ID":"e3564534-68d8-4e2e-bfb5-fbe02af59b5e","Type":"ContainerStarted","Data":"b132e02421930340c8946ec9918a8610bc9737794771cc7faa2f833bfaa24f84"} Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.402743 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.403395 4954 patch_prober.go:28] interesting pod/route-controller-manager-56b6ff4c67-lw44p container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.403436 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" podUID="e3564534-68d8-4e2e-bfb5-fbe02af59b5e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.419514 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" podStartSLOduration=1.419497477 podStartE2EDuration="1.419497477s" podCreationTimestamp="2025-11-28 16:15:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:46.418330981 +0000 UTC m=+299.809999522" watchObservedRunningTime="2025-11-28 16:15:46.419497477 +0000 UTC m=+299.811166008" Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.430394 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" podStartSLOduration=1.4303785439999999 podStartE2EDuration="1.430378544s" podCreationTimestamp="2025-11-28 16:15:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:46.429516568 +0000 UTC m=+299.821185109" watchObservedRunningTime="2025-11-28 16:15:46.430378544 +0000 UTC m=+299.822047085" Nov 28 16:15:46 crc kubenswrapper[4954]: I1128 16:15:46.436643 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:47 crc kubenswrapper[4954]: I1128 16:15:47.419607 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:49 crc kubenswrapper[4954]: I1128 16:15:49.542396 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-k7qkf"] Nov 28 16:15:49 crc kubenswrapper[4954]: I1128 16:15:49.543664 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" podUID="322cd57c-f334-48bd-ae4b-d949494360ee" containerName="controller-manager" containerID="cri-o://7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5" gracePeriod=30 Nov 28 16:15:49 crc kubenswrapper[4954]: I1128 16:15:49.557291 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p"] Nov 28 16:15:49 crc kubenswrapper[4954]: I1128 16:15:49.557487 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" podUID="e3564534-68d8-4e2e-bfb5-fbe02af59b5e" containerName="route-controller-manager" containerID="cri-o://51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419" gracePeriod=30 Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.053868 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.059659 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.128641 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-proxy-ca-bundles\") pod \"322cd57c-f334-48bd-ae4b-d949494360ee\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.128883 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-serving-cert\") pod \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.128937 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-config\") pod \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.128960 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ljqs\" (UniqueName: \"kubernetes.io/projected/322cd57c-f334-48bd-ae4b-d949494360ee-kube-api-access-7ljqs\") pod \"322cd57c-f334-48bd-ae4b-d949494360ee\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129006 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/322cd57c-f334-48bd-ae4b-d949494360ee-serving-cert\") pod \"322cd57c-f334-48bd-ae4b-d949494360ee\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129065 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnwjq\" (UniqueName: \"kubernetes.io/projected/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-kube-api-access-jnwjq\") pod \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129089 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-config\") pod \"322cd57c-f334-48bd-ae4b-d949494360ee\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129145 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-client-ca\") pod \"322cd57c-f334-48bd-ae4b-d949494360ee\" (UID: \"322cd57c-f334-48bd-ae4b-d949494360ee\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129172 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-client-ca\") pod \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\" (UID: \"e3564534-68d8-4e2e-bfb5-fbe02af59b5e\") " Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129762 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-config" (OuterVolumeSpecName: "config") pod "322cd57c-f334-48bd-ae4b-d949494360ee" (UID: "322cd57c-f334-48bd-ae4b-d949494360ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129898 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "322cd57c-f334-48bd-ae4b-d949494360ee" (UID: "322cd57c-f334-48bd-ae4b-d949494360ee"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.129938 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-config" (OuterVolumeSpecName: "config") pod "e3564534-68d8-4e2e-bfb5-fbe02af59b5e" (UID: "e3564534-68d8-4e2e-bfb5-fbe02af59b5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.130097 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-client-ca" (OuterVolumeSpecName: "client-ca") pod "e3564534-68d8-4e2e-bfb5-fbe02af59b5e" (UID: "e3564534-68d8-4e2e-bfb5-fbe02af59b5e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.130369 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-client-ca" (OuterVolumeSpecName: "client-ca") pod "322cd57c-f334-48bd-ae4b-d949494360ee" (UID: "322cd57c-f334-48bd-ae4b-d949494360ee"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.135428 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/322cd57c-f334-48bd-ae4b-d949494360ee-kube-api-access-7ljqs" (OuterVolumeSpecName: "kube-api-access-7ljqs") pod "322cd57c-f334-48bd-ae4b-d949494360ee" (UID: "322cd57c-f334-48bd-ae4b-d949494360ee"). InnerVolumeSpecName "kube-api-access-7ljqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.137634 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-kube-api-access-jnwjq" (OuterVolumeSpecName: "kube-api-access-jnwjq") pod "e3564534-68d8-4e2e-bfb5-fbe02af59b5e" (UID: "e3564534-68d8-4e2e-bfb5-fbe02af59b5e"). InnerVolumeSpecName "kube-api-access-jnwjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.137742 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/322cd57c-f334-48bd-ae4b-d949494360ee-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "322cd57c-f334-48bd-ae4b-d949494360ee" (UID: "322cd57c-f334-48bd-ae4b-d949494360ee"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.137785 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e3564534-68d8-4e2e-bfb5-fbe02af59b5e" (UID: "e3564534-68d8-4e2e-bfb5-fbe02af59b5e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231344 4954 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231395 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231421 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231442 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ljqs\" (UniqueName: \"kubernetes.io/projected/322cd57c-f334-48bd-ae4b-d949494360ee-kube-api-access-7ljqs\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231463 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/322cd57c-f334-48bd-ae4b-d949494360ee-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231482 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231499 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnwjq\" (UniqueName: \"kubernetes.io/projected/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-kube-api-access-jnwjq\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231517 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/322cd57c-f334-48bd-ae4b-d949494360ee-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.231558 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e3564534-68d8-4e2e-bfb5-fbe02af59b5e-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.429845 4954 generic.go:334] "Generic (PLEG): container finished" podID="322cd57c-f334-48bd-ae4b-d949494360ee" containerID="7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5" exitCode=0 Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.429959 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.430042 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" event={"ID":"322cd57c-f334-48bd-ae4b-d949494360ee","Type":"ContainerDied","Data":"7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5"} Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.430087 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bf64c844-k7qkf" event={"ID":"322cd57c-f334-48bd-ae4b-d949494360ee","Type":"ContainerDied","Data":"53b1415e6619019aed054d6469c3da5535568333e7d4204aea91994460d5aac1"} Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.430116 4954 scope.go:117] "RemoveContainer" containerID="7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.433110 4954 generic.go:334] "Generic (PLEG): container finished" podID="e3564534-68d8-4e2e-bfb5-fbe02af59b5e" containerID="51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419" exitCode=0 Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.433200 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" event={"ID":"e3564534-68d8-4e2e-bfb5-fbe02af59b5e","Type":"ContainerDied","Data":"51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419"} Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.433255 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" event={"ID":"e3564534-68d8-4e2e-bfb5-fbe02af59b5e","Type":"ContainerDied","Data":"b132e02421930340c8946ec9918a8610bc9737794771cc7faa2f833bfaa24f84"} Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.433350 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.464707 4954 scope.go:117] "RemoveContainer" containerID="7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5" Nov 28 16:15:50 crc kubenswrapper[4954]: E1128 16:15:50.466069 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5\": container with ID starting with 7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5 not found: ID does not exist" containerID="7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.466140 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5"} err="failed to get container status \"7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5\": rpc error: code = NotFound desc = could not find container \"7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5\": container with ID starting with 7a5e655dce7f3011c3b75836fc0f19c8b568ff6cda14d9d251b86fab77629bf5 not found: ID does not exist" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.466214 4954 scope.go:117] "RemoveContainer" containerID="51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.489864 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p"] Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.496602 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-lw44p"] Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.501997 4954 scope.go:117] "RemoveContainer" containerID="51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419" Nov 28 16:15:50 crc kubenswrapper[4954]: E1128 16:15:50.503967 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419\": container with ID starting with 51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419 not found: ID does not exist" containerID="51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.504014 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419"} err="failed to get container status \"51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419\": rpc error: code = NotFound desc = could not find container \"51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419\": container with ID starting with 51300befaf712199c18596de3922ead00d3bf57854b3f0374c51c228ec5ce419 not found: ID does not exist" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.511445 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-k7qkf"] Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.517911 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-k7qkf"] Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.778727 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-856f74b64f-54q7t"] Nov 28 16:15:50 crc kubenswrapper[4954]: E1128 16:15:50.779188 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="322cd57c-f334-48bd-ae4b-d949494360ee" containerName="controller-manager" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.779226 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="322cd57c-f334-48bd-ae4b-d949494360ee" containerName="controller-manager" Nov 28 16:15:50 crc kubenswrapper[4954]: E1128 16:15:50.779285 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3564534-68d8-4e2e-bfb5-fbe02af59b5e" containerName="route-controller-manager" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.779305 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3564534-68d8-4e2e-bfb5-fbe02af59b5e" containerName="route-controller-manager" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.779622 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3564534-68d8-4e2e-bfb5-fbe02af59b5e" containerName="route-controller-manager" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.779672 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="322cd57c-f334-48bd-ae4b-d949494360ee" containerName="controller-manager" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.780553 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.785190 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.785967 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.786169 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.786247 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.786712 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.786188 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.787321 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng"] Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.788636 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.793828 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.794266 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.794459 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.794695 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.794859 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.795092 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.797002 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-856f74b64f-54q7t"] Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.804031 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng"] Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.805735 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839153 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-client-ca\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839222 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a30b1d70-2887-4910-9569-58aeec9f7a7d-serving-cert\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839251 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llbff\" (UniqueName: \"kubernetes.io/projected/a30b1d70-2887-4910-9569-58aeec9f7a7d-kube-api-access-llbff\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839308 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-config\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839351 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0b2a147-a8d9-4a15-b107-94490f55af50-serving-cert\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839372 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-config\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839421 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-client-ca\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839450 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-proxy-ca-bundles\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.839477 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44ccg\" (UniqueName: \"kubernetes.io/projected/b0b2a147-a8d9-4a15-b107-94490f55af50-kube-api-access-44ccg\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940368 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a30b1d70-2887-4910-9569-58aeec9f7a7d-serving-cert\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940450 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llbff\" (UniqueName: \"kubernetes.io/projected/a30b1d70-2887-4910-9569-58aeec9f7a7d-kube-api-access-llbff\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940505 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-config\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940598 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0b2a147-a8d9-4a15-b107-94490f55af50-serving-cert\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940639 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-config\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940717 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-client-ca\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940769 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-proxy-ca-bundles\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940812 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44ccg\" (UniqueName: \"kubernetes.io/projected/b0b2a147-a8d9-4a15-b107-94490f55af50-kube-api-access-44ccg\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.940859 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-client-ca\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.942763 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-client-ca\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.949630 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-client-ca\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.950558 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-config\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.953003 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-proxy-ca-bundles\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.954415 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a30b1d70-2887-4910-9569-58aeec9f7a7d-serving-cert\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.964579 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44ccg\" (UniqueName: \"kubernetes.io/projected/b0b2a147-a8d9-4a15-b107-94490f55af50-kube-api-access-44ccg\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.964990 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-config\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.969438 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0b2a147-a8d9-4a15-b107-94490f55af50-serving-cert\") pod \"route-controller-manager-67cc8d88b-2rrng\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:50 crc kubenswrapper[4954]: I1128 16:15:50.987512 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llbff\" (UniqueName: \"kubernetes.io/projected/a30b1d70-2887-4910-9569-58aeec9f7a7d-kube-api-access-llbff\") pod \"controller-manager-856f74b64f-54q7t\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:51 crc kubenswrapper[4954]: I1128 16:15:51.123155 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:51 crc kubenswrapper[4954]: I1128 16:15:51.133923 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:51 crc kubenswrapper[4954]: I1128 16:15:51.424166 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-856f74b64f-54q7t"] Nov 28 16:15:51 crc kubenswrapper[4954]: I1128 16:15:51.460888 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" event={"ID":"a30b1d70-2887-4910-9569-58aeec9f7a7d","Type":"ContainerStarted","Data":"5cf172c3eacde9dfdfea28a424f827db11380f359e38534c1c69276a98a0633c"} Nov 28 16:15:51 crc kubenswrapper[4954]: I1128 16:15:51.475712 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng"] Nov 28 16:15:51 crc kubenswrapper[4954]: I1128 16:15:51.863234 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="322cd57c-f334-48bd-ae4b-d949494360ee" path="/var/lib/kubelet/pods/322cd57c-f334-48bd-ae4b-d949494360ee/volumes" Nov 28 16:15:51 crc kubenswrapper[4954]: I1128 16:15:51.864137 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3564534-68d8-4e2e-bfb5-fbe02af59b5e" path="/var/lib/kubelet/pods/e3564534-68d8-4e2e-bfb5-fbe02af59b5e/volumes" Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.473178 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" event={"ID":"b0b2a147-a8d9-4a15-b107-94490f55af50","Type":"ContainerStarted","Data":"ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0"} Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.473258 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" event={"ID":"b0b2a147-a8d9-4a15-b107-94490f55af50","Type":"ContainerStarted","Data":"375dd772733f063a7dbbf5854e2496a17adfbd1a9927f3c41eb96faba1e92483"} Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.473401 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.475511 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" event={"ID":"a30b1d70-2887-4910-9569-58aeec9f7a7d","Type":"ContainerStarted","Data":"446b05dcc1e12528d11964d59744f9c95ada720880675589699f4f0f4006452e"} Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.476249 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.481398 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.483058 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.493018 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" podStartSLOduration=3.492990795 podStartE2EDuration="3.492990795s" podCreationTimestamp="2025-11-28 16:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:52.491789328 +0000 UTC m=+305.883457879" watchObservedRunningTime="2025-11-28 16:15:52.492990795 +0000 UTC m=+305.884659366" Nov 28 16:15:52 crc kubenswrapper[4954]: I1128 16:15:52.532842 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" podStartSLOduration=3.53281549 podStartE2EDuration="3.53281549s" podCreationTimestamp="2025-11-28 16:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:15:52.525854414 +0000 UTC m=+305.917523005" watchObservedRunningTime="2025-11-28 16:15:52.53281549 +0000 UTC m=+305.924484071" Nov 28 16:16:03 crc kubenswrapper[4954]: I1128 16:16:03.960457 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-856f74b64f-54q7t"] Nov 28 16:16:03 crc kubenswrapper[4954]: I1128 16:16:03.961496 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" podUID="a30b1d70-2887-4910-9569-58aeec9f7a7d" containerName="controller-manager" containerID="cri-o://446b05dcc1e12528d11964d59744f9c95ada720880675589699f4f0f4006452e" gracePeriod=30 Nov 28 16:16:04 crc kubenswrapper[4954]: I1128 16:16:04.569176 4954 generic.go:334] "Generic (PLEG): container finished" podID="a30b1d70-2887-4910-9569-58aeec9f7a7d" containerID="446b05dcc1e12528d11964d59744f9c95ada720880675589699f4f0f4006452e" exitCode=0 Nov 28 16:16:04 crc kubenswrapper[4954]: I1128 16:16:04.569711 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" event={"ID":"a30b1d70-2887-4910-9569-58aeec9f7a7d","Type":"ContainerDied","Data":"446b05dcc1e12528d11964d59744f9c95ada720880675589699f4f0f4006452e"} Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.093768 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.137453 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-lnn5m"] Nov 28 16:16:05 crc kubenswrapper[4954]: E1128 16:16:05.138116 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30b1d70-2887-4910-9569-58aeec9f7a7d" containerName="controller-manager" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.138144 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30b1d70-2887-4910-9569-58aeec9f7a7d" containerName="controller-manager" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.138346 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="a30b1d70-2887-4910-9569-58aeec9f7a7d" containerName="controller-manager" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.138950 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.142332 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llbff\" (UniqueName: \"kubernetes.io/projected/a30b1d70-2887-4910-9569-58aeec9f7a7d-kube-api-access-llbff\") pod \"a30b1d70-2887-4910-9569-58aeec9f7a7d\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.142436 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a30b1d70-2887-4910-9569-58aeec9f7a7d-serving-cert\") pod \"a30b1d70-2887-4910-9569-58aeec9f7a7d\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.142684 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-config\") pod \"a30b1d70-2887-4910-9569-58aeec9f7a7d\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.142781 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-proxy-ca-bundles\") pod \"a30b1d70-2887-4910-9569-58aeec9f7a7d\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.142830 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-client-ca\") pod \"a30b1d70-2887-4910-9569-58aeec9f7a7d\" (UID: \"a30b1d70-2887-4910-9569-58aeec9f7a7d\") " Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.143912 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-client-ca" (OuterVolumeSpecName: "client-ca") pod "a30b1d70-2887-4910-9569-58aeec9f7a7d" (UID: "a30b1d70-2887-4910-9569-58aeec9f7a7d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.143963 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a30b1d70-2887-4910-9569-58aeec9f7a7d" (UID: "a30b1d70-2887-4910-9569-58aeec9f7a7d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.144195 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-config" (OuterVolumeSpecName: "config") pod "a30b1d70-2887-4910-9569-58aeec9f7a7d" (UID: "a30b1d70-2887-4910-9569-58aeec9f7a7d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.150996 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a30b1d70-2887-4910-9569-58aeec9f7a7d-kube-api-access-llbff" (OuterVolumeSpecName: "kube-api-access-llbff") pod "a30b1d70-2887-4910-9569-58aeec9f7a7d" (UID: "a30b1d70-2887-4910-9569-58aeec9f7a7d"). InnerVolumeSpecName "kube-api-access-llbff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.151809 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-lnn5m"] Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.158667 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a30b1d70-2887-4910-9569-58aeec9f7a7d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a30b1d70-2887-4910-9569-58aeec9f7a7d" (UID: "a30b1d70-2887-4910-9569-58aeec9f7a7d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.244965 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw6mw\" (UniqueName: \"kubernetes.io/projected/94d97980-c874-470b-aa12-4b5f0ac60881-kube-api-access-xw6mw\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245139 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-client-ca\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245192 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-proxy-ca-bundles\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245245 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-config\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245442 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94d97980-c874-470b-aa12-4b5f0ac60881-serving-cert\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245577 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245602 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245614 4954 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a30b1d70-2887-4910-9569-58aeec9f7a7d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245628 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llbff\" (UniqueName: \"kubernetes.io/projected/a30b1d70-2887-4910-9569-58aeec9f7a7d-kube-api-access-llbff\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.245640 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a30b1d70-2887-4910-9569-58aeec9f7a7d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.346892 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw6mw\" (UniqueName: \"kubernetes.io/projected/94d97980-c874-470b-aa12-4b5f0ac60881-kube-api-access-xw6mw\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.346995 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-client-ca\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.347035 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-proxy-ca-bundles\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.347068 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-config\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.347143 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94d97980-c874-470b-aa12-4b5f0ac60881-serving-cert\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.348938 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-client-ca\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.349636 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-proxy-ca-bundles\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.349799 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94d97980-c874-470b-aa12-4b5f0ac60881-config\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.353305 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94d97980-c874-470b-aa12-4b5f0ac60881-serving-cert\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.375551 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw6mw\" (UniqueName: \"kubernetes.io/projected/94d97980-c874-470b-aa12-4b5f0ac60881-kube-api-access-xw6mw\") pod \"controller-manager-55bf64c844-lnn5m\" (UID: \"94d97980-c874-470b-aa12-4b5f0ac60881\") " pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.496814 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.576661 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" event={"ID":"a30b1d70-2887-4910-9569-58aeec9f7a7d","Type":"ContainerDied","Data":"5cf172c3eacde9dfdfea28a424f827db11380f359e38534c1c69276a98a0633c"} Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.576756 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-856f74b64f-54q7t" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.576957 4954 scope.go:117] "RemoveContainer" containerID="446b05dcc1e12528d11964d59744f9c95ada720880675589699f4f0f4006452e" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.639399 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-856f74b64f-54q7t"] Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.644909 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-856f74b64f-54q7t"] Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.869107 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a30b1d70-2887-4910-9569-58aeec9f7a7d" path="/var/lib/kubelet/pods/a30b1d70-2887-4910-9569-58aeec9f7a7d/volumes" Nov 28 16:16:05 crc kubenswrapper[4954]: I1128 16:16:05.979496 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55bf64c844-lnn5m"] Nov 28 16:16:05 crc kubenswrapper[4954]: W1128 16:16:05.985857 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94d97980_c874_470b_aa12_4b5f0ac60881.slice/crio-cc6918194d63d11a3d6a05276e256657919897209bd9dcca8cf5ec16986beff6 WatchSource:0}: Error finding container cc6918194d63d11a3d6a05276e256657919897209bd9dcca8cf5ec16986beff6: Status 404 returned error can't find the container with id cc6918194d63d11a3d6a05276e256657919897209bd9dcca8cf5ec16986beff6 Nov 28 16:16:06 crc kubenswrapper[4954]: I1128 16:16:06.583201 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" event={"ID":"94d97980-c874-470b-aa12-4b5f0ac60881","Type":"ContainerStarted","Data":"b5740aa2f2619df84c291a84ba58013ac6e491d2e988c8e63ab0344ace95acf3"} Nov 28 16:16:06 crc kubenswrapper[4954]: I1128 16:16:06.583269 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" event={"ID":"94d97980-c874-470b-aa12-4b5f0ac60881","Type":"ContainerStarted","Data":"cc6918194d63d11a3d6a05276e256657919897209bd9dcca8cf5ec16986beff6"} Nov 28 16:16:06 crc kubenswrapper[4954]: I1128 16:16:06.584137 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:06 crc kubenswrapper[4954]: I1128 16:16:06.593288 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" Nov 28 16:16:06 crc kubenswrapper[4954]: I1128 16:16:06.605567 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-55bf64c844-lnn5m" podStartSLOduration=3.605552758 podStartE2EDuration="3.605552758s" podCreationTimestamp="2025-11-28 16:16:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:16:06.603642609 +0000 UTC m=+319.995311150" watchObservedRunningTime="2025-11-28 16:16:06.605552758 +0000 UTC m=+319.997221299" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.428609 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kl4j2"] Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.430214 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.448787 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kl4j2"] Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.497958 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cc08762a-2c7e-4c39-932a-57e32f69640c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.498021 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cc08762a-2c7e-4c39-932a-57e32f69640c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.498122 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-bound-sa-token\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.498221 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9p5f\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-kube-api-access-h9p5f\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.498304 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cc08762a-2c7e-4c39-932a-57e32f69640c-trusted-ca\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.498397 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-registry-tls\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.498578 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cc08762a-2c7e-4c39-932a-57e32f69640c-registry-certificates\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.498648 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.519293 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.600019 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cc08762a-2c7e-4c39-932a-57e32f69640c-trusted-ca\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.600107 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-registry-tls\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.600188 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cc08762a-2c7e-4c39-932a-57e32f69640c-registry-certificates\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.600256 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cc08762a-2c7e-4c39-932a-57e32f69640c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.600301 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cc08762a-2c7e-4c39-932a-57e32f69640c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.600332 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-bound-sa-token\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.600361 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9p5f\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-kube-api-access-h9p5f\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.601822 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cc08762a-2c7e-4c39-932a-57e32f69640c-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.602603 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cc08762a-2c7e-4c39-932a-57e32f69640c-trusted-ca\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.604420 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cc08762a-2c7e-4c39-932a-57e32f69640c-registry-certificates\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.611557 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-registry-tls\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.612719 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cc08762a-2c7e-4c39-932a-57e32f69640c-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.621606 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-bound-sa-token\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.630189 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9p5f\" (UniqueName: \"kubernetes.io/projected/cc08762a-2c7e-4c39-932a-57e32f69640c-kube-api-access-h9p5f\") pod \"image-registry-66df7c8f76-kl4j2\" (UID: \"cc08762a-2c7e-4c39-932a-57e32f69640c\") " pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:19 crc kubenswrapper[4954]: I1128 16:16:19.749410 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:20 crc kubenswrapper[4954]: I1128 16:16:20.207344 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kl4j2"] Nov 28 16:16:20 crc kubenswrapper[4954]: W1128 16:16:20.215044 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc08762a_2c7e_4c39_932a_57e32f69640c.slice/crio-a71d445c34a7491dcf5dad7c49c345cc6eb586925eca61fc2676820df6b19b26 WatchSource:0}: Error finding container a71d445c34a7491dcf5dad7c49c345cc6eb586925eca61fc2676820df6b19b26: Status 404 returned error can't find the container with id a71d445c34a7491dcf5dad7c49c345cc6eb586925eca61fc2676820df6b19b26 Nov 28 16:16:20 crc kubenswrapper[4954]: I1128 16:16:20.681141 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" event={"ID":"cc08762a-2c7e-4c39-932a-57e32f69640c","Type":"ContainerStarted","Data":"a71d445c34a7491dcf5dad7c49c345cc6eb586925eca61fc2676820df6b19b26"} Nov 28 16:16:21 crc kubenswrapper[4954]: I1128 16:16:21.695228 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" event={"ID":"cc08762a-2c7e-4c39-932a-57e32f69640c","Type":"ContainerStarted","Data":"c14960b138992d672407948363dcd4cc17ea85670bd63b81564187787057f53b"} Nov 28 16:16:21 crc kubenswrapper[4954]: I1128 16:16:21.695610 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:23 crc kubenswrapper[4954]: I1128 16:16:23.936849 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" podStartSLOduration=4.936816198 podStartE2EDuration="4.936816198s" podCreationTimestamp="2025-11-28 16:16:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:16:21.724101131 +0000 UTC m=+335.115769722" watchObservedRunningTime="2025-11-28 16:16:23.936816198 +0000 UTC m=+337.328484779" Nov 28 16:16:23 crc kubenswrapper[4954]: I1128 16:16:23.941319 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng"] Nov 28 16:16:23 crc kubenswrapper[4954]: I1128 16:16:23.941784 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" podUID="b0b2a147-a8d9-4a15-b107-94490f55af50" containerName="route-controller-manager" containerID="cri-o://ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0" gracePeriod=30 Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.396488 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.470717 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-client-ca\") pod \"b0b2a147-a8d9-4a15-b107-94490f55af50\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.470821 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-config\") pod \"b0b2a147-a8d9-4a15-b107-94490f55af50\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.470913 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0b2a147-a8d9-4a15-b107-94490f55af50-serving-cert\") pod \"b0b2a147-a8d9-4a15-b107-94490f55af50\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.470955 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44ccg\" (UniqueName: \"kubernetes.io/projected/b0b2a147-a8d9-4a15-b107-94490f55af50-kube-api-access-44ccg\") pod \"b0b2a147-a8d9-4a15-b107-94490f55af50\" (UID: \"b0b2a147-a8d9-4a15-b107-94490f55af50\") " Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.471893 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-client-ca" (OuterVolumeSpecName: "client-ca") pod "b0b2a147-a8d9-4a15-b107-94490f55af50" (UID: "b0b2a147-a8d9-4a15-b107-94490f55af50"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.472040 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-config" (OuterVolumeSpecName: "config") pod "b0b2a147-a8d9-4a15-b107-94490f55af50" (UID: "b0b2a147-a8d9-4a15-b107-94490f55af50"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.485562 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0b2a147-a8d9-4a15-b107-94490f55af50-kube-api-access-44ccg" (OuterVolumeSpecName: "kube-api-access-44ccg") pod "b0b2a147-a8d9-4a15-b107-94490f55af50" (UID: "b0b2a147-a8d9-4a15-b107-94490f55af50"). InnerVolumeSpecName "kube-api-access-44ccg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.497252 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0b2a147-a8d9-4a15-b107-94490f55af50-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b0b2a147-a8d9-4a15-b107-94490f55af50" (UID: "b0b2a147-a8d9-4a15-b107-94490f55af50"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.572591 4954 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0b2a147-a8d9-4a15-b107-94490f55af50-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.572683 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44ccg\" (UniqueName: \"kubernetes.io/projected/b0b2a147-a8d9-4a15-b107-94490f55af50-kube-api-access-44ccg\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.572700 4954 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.572712 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0b2a147-a8d9-4a15-b107-94490f55af50-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.719091 4954 generic.go:334] "Generic (PLEG): container finished" podID="b0b2a147-a8d9-4a15-b107-94490f55af50" containerID="ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0" exitCode=0 Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.719150 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" event={"ID":"b0b2a147-a8d9-4a15-b107-94490f55af50","Type":"ContainerDied","Data":"ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0"} Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.719186 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.719216 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng" event={"ID":"b0b2a147-a8d9-4a15-b107-94490f55af50","Type":"ContainerDied","Data":"375dd772733f063a7dbbf5854e2496a17adfbd1a9927f3c41eb96faba1e92483"} Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.719245 4954 scope.go:117] "RemoveContainer" containerID="ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.741811 4954 scope.go:117] "RemoveContainer" containerID="ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0" Nov 28 16:16:24 crc kubenswrapper[4954]: E1128 16:16:24.742341 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0\": container with ID starting with ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0 not found: ID does not exist" containerID="ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.742381 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0"} err="failed to get container status \"ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0\": rpc error: code = NotFound desc = could not find container \"ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0\": container with ID starting with ba1a1a27e0da1eb63ea8f89c85f540640e5793d411cab7ba2a45e988b48220a0 not found: ID does not exist" Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.760422 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng"] Nov 28 16:16:24 crc kubenswrapper[4954]: I1128 16:16:24.765348 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67cc8d88b-2rrng"] Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.792118 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4"] Nov 28 16:16:25 crc kubenswrapper[4954]: E1128 16:16:25.792974 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0b2a147-a8d9-4a15-b107-94490f55af50" containerName="route-controller-manager" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.793002 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0b2a147-a8d9-4a15-b107-94490f55af50" containerName="route-controller-manager" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.793172 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0b2a147-a8d9-4a15-b107-94490f55af50" containerName="route-controller-manager" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.794063 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.798090 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.798476 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.798661 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.798956 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.801503 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.803883 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.805998 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4"] Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.864171 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0b2a147-a8d9-4a15-b107-94490f55af50" path="/var/lib/kubelet/pods/b0b2a147-a8d9-4a15-b107-94490f55af50/volumes" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.893728 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29fdr\" (UniqueName: \"kubernetes.io/projected/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-kube-api-access-29fdr\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.893795 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-config\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.893842 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-client-ca\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.894074 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-serving-cert\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.996802 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-serving-cert\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.996882 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29fdr\" (UniqueName: \"kubernetes.io/projected/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-kube-api-access-29fdr\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.996919 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-config\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.996967 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-client-ca\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:25 crc kubenswrapper[4954]: I1128 16:16:25.998644 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-client-ca\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:26 crc kubenswrapper[4954]: I1128 16:16:26.000611 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-config\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:26 crc kubenswrapper[4954]: I1128 16:16:26.004485 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-serving-cert\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:26 crc kubenswrapper[4954]: I1128 16:16:26.017369 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29fdr\" (UniqueName: \"kubernetes.io/projected/dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa-kube-api-access-29fdr\") pod \"route-controller-manager-56b6ff4c67-8fkr4\" (UID: \"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa\") " pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:26 crc kubenswrapper[4954]: I1128 16:16:26.130513 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:26 crc kubenswrapper[4954]: I1128 16:16:26.669714 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4"] Nov 28 16:16:26 crc kubenswrapper[4954]: I1128 16:16:26.737384 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" event={"ID":"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa","Type":"ContainerStarted","Data":"75f64de4e071b1f6259ae19a70d7588ab63975909efa1db3ded19d4966eeef12"} Nov 28 16:16:27 crc kubenswrapper[4954]: I1128 16:16:27.744755 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" event={"ID":"dfcd6aca-8563-4b8f-ae1a-1d6d67733ffa","Type":"ContainerStarted","Data":"5240d01c614ca7be80415a37b17fce485d4886eedc541d32389b7ed79581d39a"} Nov 28 16:16:27 crc kubenswrapper[4954]: I1128 16:16:27.746163 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:27 crc kubenswrapper[4954]: I1128 16:16:27.755277 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" Nov 28 16:16:27 crc kubenswrapper[4954]: I1128 16:16:27.773014 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-56b6ff4c67-8fkr4" podStartSLOduration=4.772982102 podStartE2EDuration="4.772982102s" podCreationTimestamp="2025-11-28 16:16:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:16:27.764578312 +0000 UTC m=+341.156246853" watchObservedRunningTime="2025-11-28 16:16:27.772982102 +0000 UTC m=+341.164650673" Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.913085 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tw7lr"] Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.913792 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tw7lr" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="registry-server" containerID="cri-o://2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2" gracePeriod=30 Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.925154 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f5sl2"] Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.925449 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f5sl2" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="registry-server" containerID="cri-o://cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd" gracePeriod=30 Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.939495 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fvflb"] Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.939698 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" podUID="9e51474f-c520-42db-ad78-42f143642a7e" containerName="marketplace-operator" containerID="cri-o://563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad" gracePeriod=30 Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.954241 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8zxgp"] Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.954294 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xrnc2"] Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.954467 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xrnc2" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="registry-server" containerID="cri-o://3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c" gracePeriod=30 Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.954689 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8zxgp" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="registry-server" containerID="cri-o://705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c" gracePeriod=30 Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.962878 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-77lzc"] Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.964014 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:29 crc kubenswrapper[4954]: I1128 16:16:29.974679 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-77lzc"] Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.047884 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.047936 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.047959 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxg9b\" (UniqueName: \"kubernetes.io/projected/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-kube-api-access-jxg9b\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.149904 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.149953 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.149978 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxg9b\" (UniqueName: \"kubernetes.io/projected/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-kube-api-access-jxg9b\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.151937 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.164235 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.168309 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxg9b\" (UniqueName: \"kubernetes.io/projected/ea94ca81-4305-4775-bfc2-a7ce2a7f47b9-kube-api-access-jxg9b\") pod \"marketplace-operator-79b997595-77lzc\" (UID: \"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9\") " pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.287690 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.447597 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.555249 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-operator-metrics\") pod \"9e51474f-c520-42db-ad78-42f143642a7e\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.555303 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zs2gn\" (UniqueName: \"kubernetes.io/projected/9e51474f-c520-42db-ad78-42f143642a7e-kube-api-access-zs2gn\") pod \"9e51474f-c520-42db-ad78-42f143642a7e\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.555325 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-trusted-ca\") pod \"9e51474f-c520-42db-ad78-42f143642a7e\" (UID: \"9e51474f-c520-42db-ad78-42f143642a7e\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.556294 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "9e51474f-c520-42db-ad78-42f143642a7e" (UID: "9e51474f-c520-42db-ad78-42f143642a7e"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.561188 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "9e51474f-c520-42db-ad78-42f143642a7e" (UID: "9e51474f-c520-42db-ad78-42f143642a7e"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.561383 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e51474f-c520-42db-ad78-42f143642a7e-kube-api-access-zs2gn" (OuterVolumeSpecName: "kube-api-access-zs2gn") pod "9e51474f-c520-42db-ad78-42f143642a7e" (UID: "9e51474f-c520-42db-ad78-42f143642a7e"). InnerVolumeSpecName "kube-api-access-zs2gn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.664191 4954 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.664221 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zs2gn\" (UniqueName: \"kubernetes.io/projected/9e51474f-c520-42db-ad78-42f143642a7e-kube-api-access-zs2gn\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.664231 4954 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e51474f-c520-42db-ad78-42f143642a7e-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.682220 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.707808 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.712615 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.717892 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.765050 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-utilities\") pod \"500e43fe-7466-45bd-ab6c-9f357da02385\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.765765 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-catalog-content\") pod \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.765811 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-catalog-content\") pod \"0563a539-f9ae-4640-a7f2-68027936fe45\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.765851 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-utilities\") pod \"0563a539-f9ae-4640-a7f2-68027936fe45\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.765908 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-utilities\") pod \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.765952 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-utilities\") pod \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.765804 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-utilities" (OuterVolumeSpecName: "utilities") pod "500e43fe-7466-45bd-ab6c-9f357da02385" (UID: "500e43fe-7466-45bd-ab6c-9f357da02385"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.766819 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-utilities" (OuterVolumeSpecName: "utilities") pod "0563a539-f9ae-4640-a7f2-68027936fe45" (UID: "0563a539-f9ae-4640-a7f2-68027936fe45"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.767145 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-utilities" (OuterVolumeSpecName: "utilities") pod "4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" (UID: "4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.767755 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-utilities" (OuterVolumeSpecName: "utilities") pod "0a39712c-ebc2-4a9a-8205-537d4d00fae7" (UID: "0a39712c-ebc2-4a9a-8205-537d4d00fae7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.767855 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-catalog-content\") pod \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.767884 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqgt2\" (UniqueName: \"kubernetes.io/projected/0a39712c-ebc2-4a9a-8205-537d4d00fae7-kube-api-access-fqgt2\") pod \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\" (UID: \"0a39712c-ebc2-4a9a-8205-537d4d00fae7\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.767919 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz96t\" (UniqueName: \"kubernetes.io/projected/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-kube-api-access-rz96t\") pod \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\" (UID: \"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.767941 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtcrj\" (UniqueName: \"kubernetes.io/projected/0563a539-f9ae-4640-a7f2-68027936fe45-kube-api-access-gtcrj\") pod \"0563a539-f9ae-4640-a7f2-68027936fe45\" (UID: \"0563a539-f9ae-4640-a7f2-68027936fe45\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.768918 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdknf\" (UniqueName: \"kubernetes.io/projected/500e43fe-7466-45bd-ab6c-9f357da02385-kube-api-access-gdknf\") pod \"500e43fe-7466-45bd-ab6c-9f357da02385\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.768954 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-catalog-content\") pod \"500e43fe-7466-45bd-ab6c-9f357da02385\" (UID: \"500e43fe-7466-45bd-ab6c-9f357da02385\") " Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.769937 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.770048 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.770171 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.770291 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.770172 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xrnc2" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.770030 4954 generic.go:334] "Generic (PLEG): container finished" podID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerID="3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.770068 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xrnc2" event={"ID":"0a39712c-ebc2-4a9a-8205-537d4d00fae7","Type":"ContainerDied","Data":"3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.771128 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xrnc2" event={"ID":"0a39712c-ebc2-4a9a-8205-537d4d00fae7","Type":"ContainerDied","Data":"a44ef0acaffca54a08b8152d1d389fabeb17b106a2b5b7d88d8b4df627eb0ef8"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.771159 4954 scope.go:117] "RemoveContainer" containerID="3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.771302 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-kube-api-access-rz96t" (OuterVolumeSpecName: "kube-api-access-rz96t") pod "4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" (UID: "4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b"). InnerVolumeSpecName "kube-api-access-rz96t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.772149 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/500e43fe-7466-45bd-ab6c-9f357da02385-kube-api-access-gdknf" (OuterVolumeSpecName: "kube-api-access-gdknf") pod "500e43fe-7466-45bd-ab6c-9f357da02385" (UID: "500e43fe-7466-45bd-ab6c-9f357da02385"). InnerVolumeSpecName "kube-api-access-gdknf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.772445 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0563a539-f9ae-4640-a7f2-68027936fe45-kube-api-access-gtcrj" (OuterVolumeSpecName: "kube-api-access-gtcrj") pod "0563a539-f9ae-4640-a7f2-68027936fe45" (UID: "0563a539-f9ae-4640-a7f2-68027936fe45"). InnerVolumeSpecName "kube-api-access-gtcrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.773519 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a39712c-ebc2-4a9a-8205-537d4d00fae7-kube-api-access-fqgt2" (OuterVolumeSpecName: "kube-api-access-fqgt2") pod "0a39712c-ebc2-4a9a-8205-537d4d00fae7" (UID: "0a39712c-ebc2-4a9a-8205-537d4d00fae7"). InnerVolumeSpecName "kube-api-access-fqgt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.776890 4954 generic.go:334] "Generic (PLEG): container finished" podID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerID="705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.777182 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8zxgp" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.777743 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerDied","Data":"705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.777854 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8zxgp" event={"ID":"4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b","Type":"ContainerDied","Data":"69426fa0467cf83d07a844ef5a706048de18595c815680a8abe917baa34be7b8"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.782585 4954 generic.go:334] "Generic (PLEG): container finished" podID="9e51474f-c520-42db-ad78-42f143642a7e" containerID="563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.782680 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" event={"ID":"9e51474f-c520-42db-ad78-42f143642a7e","Type":"ContainerDied","Data":"563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.782713 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" event={"ID":"9e51474f-c520-42db-ad78-42f143642a7e","Type":"ContainerDied","Data":"8f1b0c8737f9df20c129657d99f33de4b4c3b1cf230e64d15be5a44102cf74c3"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.782871 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fvflb" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.796384 4954 scope.go:117] "RemoveContainer" containerID="33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.797708 4954 generic.go:334] "Generic (PLEG): container finished" podID="0563a539-f9ae-4640-a7f2-68027936fe45" containerID="2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.797775 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tw7lr" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.797835 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tw7lr" event={"ID":"0563a539-f9ae-4640-a7f2-68027936fe45","Type":"ContainerDied","Data":"2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.797976 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tw7lr" event={"ID":"0563a539-f9ae-4640-a7f2-68027936fe45","Type":"ContainerDied","Data":"957c1030f4ee4f3a287d457e151d361c42717954991d3f8ffb9f32e52eb823b9"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.801069 4954 generic.go:334] "Generic (PLEG): container finished" podID="500e43fe-7466-45bd-ab6c-9f357da02385" containerID="cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd" exitCode=0 Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.801093 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5sl2" event={"ID":"500e43fe-7466-45bd-ab6c-9f357da02385","Type":"ContainerDied","Data":"cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.801107 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f5sl2" event={"ID":"500e43fe-7466-45bd-ab6c-9f357da02385","Type":"ContainerDied","Data":"31163536d74c561b06ea4a2835429d892e3d211519ab6efd8dda5b1c2dbb137a"} Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.801176 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f5sl2" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.813370 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" (UID: "4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.813497 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fvflb"] Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.817645 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fvflb"] Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.821457 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0563a539-f9ae-4640-a7f2-68027936fe45" (UID: "0563a539-f9ae-4640-a7f2-68027936fe45"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.822837 4954 scope.go:117] "RemoveContainer" containerID="f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.834474 4954 scope.go:117] "RemoveContainer" containerID="3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.835034 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c\": container with ID starting with 3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c not found: ID does not exist" containerID="3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.835086 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c"} err="failed to get container status \"3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c\": rpc error: code = NotFound desc = could not find container \"3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c\": container with ID starting with 3b6d210faf54d5239f3f26fc08014bab3beed9293e9b24197ce626db700c9c5c not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.835118 4954 scope.go:117] "RemoveContainer" containerID="33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.835392 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915\": container with ID starting with 33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915 not found: ID does not exist" containerID="33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.835426 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915"} err="failed to get container status \"33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915\": rpc error: code = NotFound desc = could not find container \"33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915\": container with ID starting with 33ae1bcd43d2df59007d1d3e517790e1acff2f2847b5f9ff21f61ac6220ac915 not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.835448 4954 scope.go:117] "RemoveContainer" containerID="f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.835724 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d\": container with ID starting with f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d not found: ID does not exist" containerID="f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.835757 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d"} err="failed to get container status \"f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d\": rpc error: code = NotFound desc = could not find container \"f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d\": container with ID starting with f33f19aefbce290573a2fffd553b55b21eed7852b1acb2a408bc2d5920dde18d not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.835775 4954 scope.go:117] "RemoveContainer" containerID="705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.847870 4954 scope.go:117] "RemoveContainer" containerID="0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.858704 4954 scope.go:117] "RemoveContainer" containerID="bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.869141 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "500e43fe-7466-45bd-ab6c-9f357da02385" (UID: "500e43fe-7466-45bd-ab6c-9f357da02385"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.870715 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a39712c-ebc2-4a9a-8205-537d4d00fae7" (UID: "0a39712c-ebc2-4a9a-8205-537d4d00fae7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.871969 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.872316 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqgt2\" (UniqueName: \"kubernetes.io/projected/0a39712c-ebc2-4a9a-8205-537d4d00fae7-kube-api-access-fqgt2\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.872352 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz96t\" (UniqueName: \"kubernetes.io/projected/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b-kube-api-access-rz96t\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.872371 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtcrj\" (UniqueName: \"kubernetes.io/projected/0563a539-f9ae-4640-a7f2-68027936fe45-kube-api-access-gtcrj\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.872388 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdknf\" (UniqueName: \"kubernetes.io/projected/500e43fe-7466-45bd-ab6c-9f357da02385-kube-api-access-gdknf\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.872405 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500e43fe-7466-45bd-ab6c-9f357da02385-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.872421 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a39712c-ebc2-4a9a-8205-537d4d00fae7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.872438 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0563a539-f9ae-4640-a7f2-68027936fe45-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.877475 4954 scope.go:117] "RemoveContainer" containerID="705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.877950 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c\": container with ID starting with 705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c not found: ID does not exist" containerID="705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.877988 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c"} err="failed to get container status \"705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c\": rpc error: code = NotFound desc = could not find container \"705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c\": container with ID starting with 705912eab99cb974234a13b7ba102b5dcede89f77455a8de889a249188a8d40c not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.878019 4954 scope.go:117] "RemoveContainer" containerID="0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.878344 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a\": container with ID starting with 0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a not found: ID does not exist" containerID="0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.878371 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a"} err="failed to get container status \"0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a\": rpc error: code = NotFound desc = could not find container \"0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a\": container with ID starting with 0cfb9600ae329cffc5ca9ea760d07066a9d68895135acf665310609695c9638a not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.878389 4954 scope.go:117] "RemoveContainer" containerID="bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.878809 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53\": container with ID starting with bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53 not found: ID does not exist" containerID="bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.878861 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53"} err="failed to get container status \"bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53\": rpc error: code = NotFound desc = could not find container \"bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53\": container with ID starting with bf87f6ba78fe1e9365a55580524b05bd5819d176b94f820ee9ac33562e9c2c53 not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.878897 4954 scope.go:117] "RemoveContainer" containerID="563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.894550 4954 scope.go:117] "RemoveContainer" containerID="563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.896669 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad\": container with ID starting with 563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad not found: ID does not exist" containerID="563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.896706 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad"} err="failed to get container status \"563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad\": rpc error: code = NotFound desc = could not find container \"563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad\": container with ID starting with 563952950c0ef2198124863da9eaa76245ba156772af525f32bf5e0e96f2a1ad not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.896731 4954 scope.go:117] "RemoveContainer" containerID="2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.913444 4954 scope.go:117] "RemoveContainer" containerID="9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.914768 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-77lzc"] Nov 28 16:16:30 crc kubenswrapper[4954]: W1128 16:16:30.920340 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea94ca81_4305_4775_bfc2_a7ce2a7f47b9.slice/crio-74027c60bd9f6702b953a32711cf7b7d30b457b74765b7d1ed7df13288c42f0b WatchSource:0}: Error finding container 74027c60bd9f6702b953a32711cf7b7d30b457b74765b7d1ed7df13288c42f0b: Status 404 returned error can't find the container with id 74027c60bd9f6702b953a32711cf7b7d30b457b74765b7d1ed7df13288c42f0b Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.933060 4954 scope.go:117] "RemoveContainer" containerID="b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.955918 4954 scope.go:117] "RemoveContainer" containerID="2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.956726 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2\": container with ID starting with 2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2 not found: ID does not exist" containerID="2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.956822 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2"} err="failed to get container status \"2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2\": rpc error: code = NotFound desc = could not find container \"2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2\": container with ID starting with 2f17e7476426a09fd2e70814d4939417f16b93cd28187737a1104367b78e0da2 not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.956904 4954 scope.go:117] "RemoveContainer" containerID="9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.957446 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c\": container with ID starting with 9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c not found: ID does not exist" containerID="9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.957477 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c"} err="failed to get container status \"9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c\": rpc error: code = NotFound desc = could not find container \"9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c\": container with ID starting with 9d253fa558d47073550c892d5af409def0bc5659decc52cc7f1d13241174113c not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.957498 4954 scope.go:117] "RemoveContainer" containerID="b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48" Nov 28 16:16:30 crc kubenswrapper[4954]: E1128 16:16:30.963643 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48\": container with ID starting with b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48 not found: ID does not exist" containerID="b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.963738 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48"} err="failed to get container status \"b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48\": rpc error: code = NotFound desc = could not find container \"b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48\": container with ID starting with b94e83accae0ecb4444afbbf529a677de315b2e218c45bd542c04676b54e1f48 not found: ID does not exist" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.963806 4954 scope.go:117] "RemoveContainer" containerID="cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd" Nov 28 16:16:30 crc kubenswrapper[4954]: I1128 16:16:30.993662 4954 scope.go:117] "RemoveContainer" containerID="dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.020895 4954 scope.go:117] "RemoveContainer" containerID="07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.032703 4954 scope.go:117] "RemoveContainer" containerID="cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.033154 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd\": container with ID starting with cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd not found: ID does not exist" containerID="cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.033197 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd"} err="failed to get container status \"cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd\": rpc error: code = NotFound desc = could not find container \"cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd\": container with ID starting with cf5d28b0fb429b471046c78c76a6091f87a20188849b00ffd15517c8ba9c55cd not found: ID does not exist" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.033225 4954 scope.go:117] "RemoveContainer" containerID="dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.033643 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d\": container with ID starting with dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d not found: ID does not exist" containerID="dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.033687 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d"} err="failed to get container status \"dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d\": rpc error: code = NotFound desc = could not find container \"dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d\": container with ID starting with dbf5acda2d49562fbf64c83f3441df2c864a668c93b66f0ec2f5d7ff076d4f5d not found: ID does not exist" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.033714 4954 scope.go:117] "RemoveContainer" containerID="07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.034168 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500\": container with ID starting with 07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500 not found: ID does not exist" containerID="07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.034204 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500"} err="failed to get container status \"07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500\": rpc error: code = NotFound desc = could not find container \"07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500\": container with ID starting with 07b6cc6da14036ef4f80c51c80e5a97e3802cd23676390baf41227a2e4f78500 not found: ID does not exist" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.131123 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xrnc2"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.140270 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xrnc2"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.153021 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tw7lr"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.157341 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tw7lr"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.163457 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8zxgp"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.170579 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8zxgp"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.176906 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f5sl2"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.179913 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f5sl2"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.812147 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" event={"ID":"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9","Type":"ContainerStarted","Data":"8282bc2d7e84673f443e92c7bcebf60ec12693f88f18e0751bf8930cb1c7d837"} Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.812415 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" event={"ID":"ea94ca81-4305-4775-bfc2-a7ce2a7f47b9","Type":"ContainerStarted","Data":"74027c60bd9f6702b953a32711cf7b7d30b457b74765b7d1ed7df13288c42f0b"} Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.812474 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.831138 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.853272 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-77lzc" podStartSLOduration=2.853248889 podStartE2EDuration="2.853248889s" podCreationTimestamp="2025-11-28 16:16:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:16:31.83259217 +0000 UTC m=+345.224260782" watchObservedRunningTime="2025-11-28 16:16:31.853248889 +0000 UTC m=+345.244917440" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.865267 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" path="/var/lib/kubelet/pods/0563a539-f9ae-4640-a7f2-68027936fe45/volumes" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.866107 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" path="/var/lib/kubelet/pods/0a39712c-ebc2-4a9a-8205-537d4d00fae7/volumes" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.866673 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" path="/var/lib/kubelet/pods/4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b/volumes" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.868572 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" path="/var/lib/kubelet/pods/500e43fe-7466-45bd-ab6c-9f357da02385/volumes" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.869321 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e51474f-c520-42db-ad78-42f143642a7e" path="/var/lib/kubelet/pods/9e51474f-c520-42db-ad78-42f143642a7e/volumes" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.953769 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n2lg2"] Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954010 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954022 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954029 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954037 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954047 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954053 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954065 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954070 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954082 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954087 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954095 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954100 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954106 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954111 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954119 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954125 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954133 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954139 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954146 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954152 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="extract-content" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954160 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954166 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954173 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954180 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="extract-utilities" Nov 28 16:16:31 crc kubenswrapper[4954]: E1128 16:16:31.954190 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e51474f-c520-42db-ad78-42f143642a7e" containerName="marketplace-operator" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954196 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e51474f-c520-42db-ad78-42f143642a7e" containerName="marketplace-operator" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954297 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="500e43fe-7466-45bd-ab6c-9f357da02385" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954308 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="0563a539-f9ae-4640-a7f2-68027936fe45" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954317 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af7e770-94dd-4b2f-bb8d-c6d49ac9b39b" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954325 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e51474f-c520-42db-ad78-42f143642a7e" containerName="marketplace-operator" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.954333 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a39712c-ebc2-4a9a-8205-537d4d00fae7" containerName="registry-server" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.955057 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.959215 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.963676 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2lg2"] Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.986466 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-utilities\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.986701 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-catalog-content\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:31 crc kubenswrapper[4954]: I1128 16:16:31.986819 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljwsn\" (UniqueName: \"kubernetes.io/projected/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-kube-api-access-ljwsn\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.088604 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-catalog-content\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.088666 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljwsn\" (UniqueName: \"kubernetes.io/projected/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-kube-api-access-ljwsn\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.088693 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-utilities\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.089206 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-utilities\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.089500 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-catalog-content\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.116330 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljwsn\" (UniqueName: \"kubernetes.io/projected/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-kube-api-access-ljwsn\") pod \"redhat-marketplace-n2lg2\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.144832 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sjfwm"] Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.145782 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.149171 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.158102 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sjfwm"] Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.189368 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfe853ea-55a1-4d06-a731-005c0bb33e7c-utilities\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.189433 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp7q8\" (UniqueName: \"kubernetes.io/projected/dfe853ea-55a1-4d06-a731-005c0bb33e7c-kube-api-access-tp7q8\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.189468 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfe853ea-55a1-4d06-a731-005c0bb33e7c-catalog-content\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.281438 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.290111 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp7q8\" (UniqueName: \"kubernetes.io/projected/dfe853ea-55a1-4d06-a731-005c0bb33e7c-kube-api-access-tp7q8\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.290156 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfe853ea-55a1-4d06-a731-005c0bb33e7c-catalog-content\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.290195 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfe853ea-55a1-4d06-a731-005c0bb33e7c-utilities\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.290563 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfe853ea-55a1-4d06-a731-005c0bb33e7c-utilities\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.290632 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfe853ea-55a1-4d06-a731-005c0bb33e7c-catalog-content\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.311052 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp7q8\" (UniqueName: \"kubernetes.io/projected/dfe853ea-55a1-4d06-a731-005c0bb33e7c-kube-api-access-tp7q8\") pod \"redhat-operators-sjfwm\" (UID: \"dfe853ea-55a1-4d06-a731-005c0bb33e7c\") " pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.466021 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.480644 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.480693 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.709615 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2lg2"] Nov 28 16:16:32 crc kubenswrapper[4954]: W1128 16:16:32.721881 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae51cec8_1ee7_4bb7_bc2b_09763a11aada.slice/crio-6dd999537dc423e0548308ed188aa4f7d55085e3d6da31528bc0324173aab1d4 WatchSource:0}: Error finding container 6dd999537dc423e0548308ed188aa4f7d55085e3d6da31528bc0324173aab1d4: Status 404 returned error can't find the container with id 6dd999537dc423e0548308ed188aa4f7d55085e3d6da31528bc0324173aab1d4 Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.831225 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2lg2" event={"ID":"ae51cec8-1ee7-4bb7-bc2b-09763a11aada","Type":"ContainerStarted","Data":"6dd999537dc423e0548308ed188aa4f7d55085e3d6da31528bc0324173aab1d4"} Nov 28 16:16:32 crc kubenswrapper[4954]: I1128 16:16:32.842876 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sjfwm"] Nov 28 16:16:32 crc kubenswrapper[4954]: W1128 16:16:32.846636 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddfe853ea_55a1_4d06_a731_005c0bb33e7c.slice/crio-46574dea97ca929c7dd7de63ba2ca385888a064cb4c4efa22fc5aecd877d7859 WatchSource:0}: Error finding container 46574dea97ca929c7dd7de63ba2ca385888a064cb4c4efa22fc5aecd877d7859: Status 404 returned error can't find the container with id 46574dea97ca929c7dd7de63ba2ca385888a064cb4c4efa22fc5aecd877d7859 Nov 28 16:16:33 crc kubenswrapper[4954]: I1128 16:16:33.837517 4954 generic.go:334] "Generic (PLEG): container finished" podID="dfe853ea-55a1-4d06-a731-005c0bb33e7c" containerID="be612cb1e97d70004bbfca9c88a3aecec10f9eebbb5f518c1d1685765ad3228d" exitCode=0 Nov 28 16:16:33 crc kubenswrapper[4954]: I1128 16:16:33.837687 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjfwm" event={"ID":"dfe853ea-55a1-4d06-a731-005c0bb33e7c","Type":"ContainerDied","Data":"be612cb1e97d70004bbfca9c88a3aecec10f9eebbb5f518c1d1685765ad3228d"} Nov 28 16:16:33 crc kubenswrapper[4954]: I1128 16:16:33.838115 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjfwm" event={"ID":"dfe853ea-55a1-4d06-a731-005c0bb33e7c","Type":"ContainerStarted","Data":"46574dea97ca929c7dd7de63ba2ca385888a064cb4c4efa22fc5aecd877d7859"} Nov 28 16:16:33 crc kubenswrapper[4954]: I1128 16:16:33.839721 4954 generic.go:334] "Generic (PLEG): container finished" podID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerID="02b1de4b73a1192bb7c1d50f93b16b3cf7d70e5b59837d499c7254c4c0a57924" exitCode=0 Nov 28 16:16:33 crc kubenswrapper[4954]: I1128 16:16:33.840487 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2lg2" event={"ID":"ae51cec8-1ee7-4bb7-bc2b-09763a11aada","Type":"ContainerDied","Data":"02b1de4b73a1192bb7c1d50f93b16b3cf7d70e5b59837d499c7254c4c0a57924"} Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.549065 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wxnll"] Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.555971 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.556824 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wxnll"] Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.558830 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.622045 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-catalog-content\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.622116 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-utilities\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.622135 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pgbn\" (UniqueName: \"kubernetes.io/projected/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-kube-api-access-8pgbn\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.723335 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-catalog-content\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.723418 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-utilities\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.723444 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pgbn\" (UniqueName: \"kubernetes.io/projected/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-kube-api-access-8pgbn\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.723905 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-catalog-content\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.723941 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-utilities\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.743009 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zzbvs"] Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.743939 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.746228 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pgbn\" (UniqueName: \"kubernetes.io/projected/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-kube-api-access-8pgbn\") pod \"community-operators-wxnll\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.748230 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.773981 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zzbvs"] Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.824906 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsf7b\" (UniqueName: \"kubernetes.io/projected/a8c83942-d8fd-4b50-b873-df962c118505-kube-api-access-jsf7b\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.824989 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-utilities\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.825039 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-catalog-content\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.903667 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.925725 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsf7b\" (UniqueName: \"kubernetes.io/projected/a8c83942-d8fd-4b50-b873-df962c118505-kube-api-access-jsf7b\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.925862 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-utilities\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.925963 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-catalog-content\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.928701 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-utilities\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.928742 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-catalog-content\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:34 crc kubenswrapper[4954]: I1128 16:16:34.960475 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsf7b\" (UniqueName: \"kubernetes.io/projected/a8c83942-d8fd-4b50-b873-df962c118505-kube-api-access-jsf7b\") pod \"certified-operators-zzbvs\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.106434 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.349325 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wxnll"] Nov 28 16:16:35 crc kubenswrapper[4954]: W1128 16:16:35.375724 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b4725ee_80be_4d4e_b7b1_f1e0ac4ab0ec.slice/crio-66c0f089af2a208d2d8fe9a591e4e6d8406bbbdcb22c5078a4354968d7d4a191 WatchSource:0}: Error finding container 66c0f089af2a208d2d8fe9a591e4e6d8406bbbdcb22c5078a4354968d7d4a191: Status 404 returned error can't find the container with id 66c0f089af2a208d2d8fe9a591e4e6d8406bbbdcb22c5078a4354968d7d4a191 Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.499067 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zzbvs"] Nov 28 16:16:35 crc kubenswrapper[4954]: W1128 16:16:35.566248 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8c83942_d8fd_4b50_b873_df962c118505.slice/crio-3a696fbc2e150be5b0d93fe3bdc932c45c1ccb711d0f87222ef94bdb73f00f81 WatchSource:0}: Error finding container 3a696fbc2e150be5b0d93fe3bdc932c45c1ccb711d0f87222ef94bdb73f00f81: Status 404 returned error can't find the container with id 3a696fbc2e150be5b0d93fe3bdc932c45c1ccb711d0f87222ef94bdb73f00f81 Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.851587 4954 generic.go:334] "Generic (PLEG): container finished" podID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerID="f77874abe0e0d6912b9bfc67dce7510ecfadb53c35346dd4845d3402c48e4965" exitCode=0 Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.851915 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxnll" event={"ID":"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec","Type":"ContainerDied","Data":"f77874abe0e0d6912b9bfc67dce7510ecfadb53c35346dd4845d3402c48e4965"} Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.851943 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxnll" event={"ID":"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec","Type":"ContainerStarted","Data":"66c0f089af2a208d2d8fe9a591e4e6d8406bbbdcb22c5078a4354968d7d4a191"} Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.854228 4954 generic.go:334] "Generic (PLEG): container finished" podID="a8c83942-d8fd-4b50-b873-df962c118505" containerID="fb862f1558d8de081470ac882ee3e8d62f693084d625134db0d02eae91158ca0" exitCode=0 Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.854692 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zzbvs" event={"ID":"a8c83942-d8fd-4b50-b873-df962c118505","Type":"ContainerDied","Data":"fb862f1558d8de081470ac882ee3e8d62f693084d625134db0d02eae91158ca0"} Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.854732 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zzbvs" event={"ID":"a8c83942-d8fd-4b50-b873-df962c118505","Type":"ContainerStarted","Data":"3a696fbc2e150be5b0d93fe3bdc932c45c1ccb711d0f87222ef94bdb73f00f81"} Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.861072 4954 generic.go:334] "Generic (PLEG): container finished" podID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerID="883fde5c60726c48ca91ade0cea17b842a8fd013c82a67f28930ab8e34224d1e" exitCode=0 Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.865831 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2lg2" event={"ID":"ae51cec8-1ee7-4bb7-bc2b-09763a11aada","Type":"ContainerDied","Data":"883fde5c60726c48ca91ade0cea17b842a8fd013c82a67f28930ab8e34224d1e"} Nov 28 16:16:35 crc kubenswrapper[4954]: I1128 16:16:35.865868 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjfwm" event={"ID":"dfe853ea-55a1-4d06-a731-005c0bb33e7c","Type":"ContainerStarted","Data":"25e4d01058fec80f9ab9708c99bbc6acbad9049c42e3bd1a03e0cf8b38b1a10b"} Nov 28 16:16:36 crc kubenswrapper[4954]: I1128 16:16:36.871404 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2lg2" event={"ID":"ae51cec8-1ee7-4bb7-bc2b-09763a11aada","Type":"ContainerStarted","Data":"70a2eae1934b41c3d4516e6c76a77e4aa553399624c925f7a3bd6c885791df47"} Nov 28 16:16:36 crc kubenswrapper[4954]: I1128 16:16:36.875839 4954 generic.go:334] "Generic (PLEG): container finished" podID="dfe853ea-55a1-4d06-a731-005c0bb33e7c" containerID="25e4d01058fec80f9ab9708c99bbc6acbad9049c42e3bd1a03e0cf8b38b1a10b" exitCode=0 Nov 28 16:16:36 crc kubenswrapper[4954]: I1128 16:16:36.875883 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjfwm" event={"ID":"dfe853ea-55a1-4d06-a731-005c0bb33e7c","Type":"ContainerDied","Data":"25e4d01058fec80f9ab9708c99bbc6acbad9049c42e3bd1a03e0cf8b38b1a10b"} Nov 28 16:16:36 crc kubenswrapper[4954]: I1128 16:16:36.897687 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n2lg2" podStartSLOduration=3.059430104 podStartE2EDuration="5.8976644s" podCreationTimestamp="2025-11-28 16:16:31 +0000 UTC" firstStartedPulling="2025-11-28 16:16:33.841586131 +0000 UTC m=+347.233254702" lastFinishedPulling="2025-11-28 16:16:36.679820457 +0000 UTC m=+350.071488998" observedRunningTime="2025-11-28 16:16:36.893242443 +0000 UTC m=+350.284911014" watchObservedRunningTime="2025-11-28 16:16:36.8976644 +0000 UTC m=+350.289332951" Nov 28 16:16:37 crc kubenswrapper[4954]: I1128 16:16:37.894959 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sjfwm" event={"ID":"dfe853ea-55a1-4d06-a731-005c0bb33e7c","Type":"ContainerStarted","Data":"dab03f4a69904207ca23cb4a9c93b27519da3bfc8b806b2362f15c6d20d79976"} Nov 28 16:16:37 crc kubenswrapper[4954]: I1128 16:16:37.899864 4954 generic.go:334] "Generic (PLEG): container finished" podID="a8c83942-d8fd-4b50-b873-df962c118505" containerID="8499b7ad8a61fc1553e04ae7a045270af04df252722b02f43fb3d076cf9e45d9" exitCode=0 Nov 28 16:16:37 crc kubenswrapper[4954]: I1128 16:16:37.900614 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zzbvs" event={"ID":"a8c83942-d8fd-4b50-b873-df962c118505","Type":"ContainerDied","Data":"8499b7ad8a61fc1553e04ae7a045270af04df252722b02f43fb3d076cf9e45d9"} Nov 28 16:16:37 crc kubenswrapper[4954]: I1128 16:16:37.914700 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sjfwm" podStartSLOduration=2.271041343 podStartE2EDuration="5.914681627s" podCreationTimestamp="2025-11-28 16:16:32 +0000 UTC" firstStartedPulling="2025-11-28 16:16:33.839630831 +0000 UTC m=+347.231299372" lastFinishedPulling="2025-11-28 16:16:37.483271115 +0000 UTC m=+350.874939656" observedRunningTime="2025-11-28 16:16:37.912356195 +0000 UTC m=+351.304024736" watchObservedRunningTime="2025-11-28 16:16:37.914681627 +0000 UTC m=+351.306350178" Nov 28 16:16:39 crc kubenswrapper[4954]: I1128 16:16:39.755063 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-kl4j2" Nov 28 16:16:39 crc kubenswrapper[4954]: I1128 16:16:39.805789 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w96mt"] Nov 28 16:16:39 crc kubenswrapper[4954]: I1128 16:16:39.912333 4954 generic.go:334] "Generic (PLEG): container finished" podID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerID="22c04f03a906a58c724ce5e7fdd51294c602526dd4eccca7a89b58541155a247" exitCode=0 Nov 28 16:16:39 crc kubenswrapper[4954]: I1128 16:16:39.912404 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxnll" event={"ID":"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec","Type":"ContainerDied","Data":"22c04f03a906a58c724ce5e7fdd51294c602526dd4eccca7a89b58541155a247"} Nov 28 16:16:39 crc kubenswrapper[4954]: I1128 16:16:39.917207 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zzbvs" event={"ID":"a8c83942-d8fd-4b50-b873-df962c118505","Type":"ContainerStarted","Data":"2a36a6ef09a5b475572e05a42bc3aa4f263f00ab339c6fe803b683e9b0c2f1e9"} Nov 28 16:16:39 crc kubenswrapper[4954]: I1128 16:16:39.957583 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zzbvs" podStartSLOduration=3.2056647480000002 podStartE2EDuration="5.957565236s" podCreationTimestamp="2025-11-28 16:16:34 +0000 UTC" firstStartedPulling="2025-11-28 16:16:35.855359741 +0000 UTC m=+349.247028282" lastFinishedPulling="2025-11-28 16:16:38.607260219 +0000 UTC m=+351.998928770" observedRunningTime="2025-11-28 16:16:39.952693995 +0000 UTC m=+353.344362546" watchObservedRunningTime="2025-11-28 16:16:39.957565236 +0000 UTC m=+353.349233787" Nov 28 16:16:40 crc kubenswrapper[4954]: I1128 16:16:40.926931 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxnll" event={"ID":"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec","Type":"ContainerStarted","Data":"6835f72240ac625d81eb99af7f9bf97ade1c2dc307129c9df59ef1efe5a36d55"} Nov 28 16:16:40 crc kubenswrapper[4954]: I1128 16:16:40.950067 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wxnll" podStartSLOduration=2.423669092 podStartE2EDuration="6.950043015s" podCreationTimestamp="2025-11-28 16:16:34 +0000 UTC" firstStartedPulling="2025-11-28 16:16:35.853274436 +0000 UTC m=+349.244942977" lastFinishedPulling="2025-11-28 16:16:40.379648359 +0000 UTC m=+353.771316900" observedRunningTime="2025-11-28 16:16:40.946166466 +0000 UTC m=+354.337835007" watchObservedRunningTime="2025-11-28 16:16:40.950043015 +0000 UTC m=+354.341711566" Nov 28 16:16:42 crc kubenswrapper[4954]: I1128 16:16:42.282092 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:42 crc kubenswrapper[4954]: I1128 16:16:42.282451 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:42 crc kubenswrapper[4954]: I1128 16:16:42.321989 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:42 crc kubenswrapper[4954]: I1128 16:16:42.466853 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:42 crc kubenswrapper[4954]: I1128 16:16:42.466906 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:42 crc kubenswrapper[4954]: I1128 16:16:42.988391 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 16:16:43 crc kubenswrapper[4954]: I1128 16:16:43.515849 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-sjfwm" podUID="dfe853ea-55a1-4d06-a731-005c0bb33e7c" containerName="registry-server" probeResult="failure" output=< Nov 28 16:16:43 crc kubenswrapper[4954]: timeout: failed to connect service ":50051" within 1s Nov 28 16:16:43 crc kubenswrapper[4954]: > Nov 28 16:16:44 crc kubenswrapper[4954]: I1128 16:16:44.903976 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:44 crc kubenswrapper[4954]: I1128 16:16:44.904037 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:44 crc kubenswrapper[4954]: I1128 16:16:44.973629 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:16:45 crc kubenswrapper[4954]: I1128 16:16:45.107189 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:45 crc kubenswrapper[4954]: I1128 16:16:45.107449 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:45 crc kubenswrapper[4954]: I1128 16:16:45.169546 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:46 crc kubenswrapper[4954]: I1128 16:16:46.008146 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 16:16:52 crc kubenswrapper[4954]: I1128 16:16:52.528324 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:52 crc kubenswrapper[4954]: I1128 16:16:52.602831 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sjfwm" Nov 28 16:16:54 crc kubenswrapper[4954]: I1128 16:16:54.957851 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wxnll" Nov 28 16:17:02 crc kubenswrapper[4954]: I1128 16:17:02.481350 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:17:02 crc kubenswrapper[4954]: I1128 16:17:02.482100 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:17:04 crc kubenswrapper[4954]: I1128 16:17:04.846375 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" podUID="d42a5371-0448-48d1-8df8-aee818ea6644" containerName="registry" containerID="cri-o://cef4a51e34bd09df4ec25fb5129bfacc5984f1b281439845e443316f516f16a7" gracePeriod=30 Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.073091 4954 generic.go:334] "Generic (PLEG): container finished" podID="d42a5371-0448-48d1-8df8-aee818ea6644" containerID="cef4a51e34bd09df4ec25fb5129bfacc5984f1b281439845e443316f516f16a7" exitCode=0 Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.073154 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" event={"ID":"d42a5371-0448-48d1-8df8-aee818ea6644","Type":"ContainerDied","Data":"cef4a51e34bd09df4ec25fb5129bfacc5984f1b281439845e443316f516f16a7"} Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.322374 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.442588 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54kfl\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-kube-api-access-54kfl\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.442655 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-registry-tls\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.442766 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-bound-sa-token\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.442804 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d42a5371-0448-48d1-8df8-aee818ea6644-ca-trust-extracted\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.443767 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-registry-certificates\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.444040 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.444097 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-trusted-ca\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.444140 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d42a5371-0448-48d1-8df8-aee818ea6644-installation-pull-secrets\") pod \"d42a5371-0448-48d1-8df8-aee818ea6644\" (UID: \"d42a5371-0448-48d1-8df8-aee818ea6644\") " Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.444554 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.445297 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.451369 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d42a5371-0448-48d1-8df8-aee818ea6644-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.451390 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.452072 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.455690 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-kube-api-access-54kfl" (OuterVolumeSpecName: "kube-api-access-54kfl") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "kube-api-access-54kfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.458148 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.481812 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d42a5371-0448-48d1-8df8-aee818ea6644-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "d42a5371-0448-48d1-8df8-aee818ea6644" (UID: "d42a5371-0448-48d1-8df8-aee818ea6644"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.546237 4954 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.546286 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d42a5371-0448-48d1-8df8-aee818ea6644-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.546306 4954 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d42a5371-0448-48d1-8df8-aee818ea6644-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.546325 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54kfl\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-kube-api-access-54kfl\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.546342 4954 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.546358 4954 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d42a5371-0448-48d1-8df8-aee818ea6644-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:05 crc kubenswrapper[4954]: I1128 16:17:05.546377 4954 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d42a5371-0448-48d1-8df8-aee818ea6644-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:06 crc kubenswrapper[4954]: I1128 16:17:06.082099 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" event={"ID":"d42a5371-0448-48d1-8df8-aee818ea6644","Type":"ContainerDied","Data":"89422d44d84e86fbcc87edf1ab0f1be1e731872f2807c5f0d4e3414f5de0b48e"} Nov 28 16:17:06 crc kubenswrapper[4954]: I1128 16:17:06.082165 4954 scope.go:117] "RemoveContainer" containerID="cef4a51e34bd09df4ec25fb5129bfacc5984f1b281439845e443316f516f16a7" Nov 28 16:17:06 crc kubenswrapper[4954]: I1128 16:17:06.082183 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-w96mt" Nov 28 16:17:06 crc kubenswrapper[4954]: I1128 16:17:06.115940 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w96mt"] Nov 28 16:17:06 crc kubenswrapper[4954]: I1128 16:17:06.121472 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-w96mt"] Nov 28 16:17:07 crc kubenswrapper[4954]: I1128 16:17:07.867510 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d42a5371-0448-48d1-8df8-aee818ea6644" path="/var/lib/kubelet/pods/d42a5371-0448-48d1-8df8-aee818ea6644/volumes" Nov 28 16:17:32 crc kubenswrapper[4954]: I1128 16:17:32.480602 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:17:32 crc kubenswrapper[4954]: I1128 16:17:32.481407 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:17:32 crc kubenswrapper[4954]: I1128 16:17:32.481495 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:17:32 crc kubenswrapper[4954]: I1128 16:17:32.482440 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cb24ea3233adacc195fae8733d377a677bc6d6ac461d682e127e50d4c1e27874"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:17:32 crc kubenswrapper[4954]: I1128 16:17:32.482590 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://cb24ea3233adacc195fae8733d377a677bc6d6ac461d682e127e50d4c1e27874" gracePeriod=600 Nov 28 16:17:33 crc kubenswrapper[4954]: I1128 16:17:33.279130 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="cb24ea3233adacc195fae8733d377a677bc6d6ac461d682e127e50d4c1e27874" exitCode=0 Nov 28 16:17:33 crc kubenswrapper[4954]: I1128 16:17:33.279237 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"cb24ea3233adacc195fae8733d377a677bc6d6ac461d682e127e50d4c1e27874"} Nov 28 16:17:33 crc kubenswrapper[4954]: I1128 16:17:33.279597 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"c0ae34b57f26b8a5507b044e5a8e89b38ca24fbf6f3880b3c5bf367c012726c1"} Nov 28 16:17:33 crc kubenswrapper[4954]: I1128 16:17:33.279631 4954 scope.go:117] "RemoveContainer" containerID="741082937cc4557906629425d1d18a3af22eebf353dbdee98f37a7fb59f50046" Nov 28 16:19:32 crc kubenswrapper[4954]: I1128 16:19:32.481504 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:19:32 crc kubenswrapper[4954]: I1128 16:19:32.482141 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:02 crc kubenswrapper[4954]: I1128 16:20:02.480375 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:02 crc kubenswrapper[4954]: I1128 16:20:02.481221 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:32 crc kubenswrapper[4954]: I1128 16:20:32.480576 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:32 crc kubenswrapper[4954]: I1128 16:20:32.481322 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:32 crc kubenswrapper[4954]: I1128 16:20:32.481392 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:20:32 crc kubenswrapper[4954]: I1128 16:20:32.482290 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c0ae34b57f26b8a5507b044e5a8e89b38ca24fbf6f3880b3c5bf367c012726c1"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:20:32 crc kubenswrapper[4954]: I1128 16:20:32.482392 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://c0ae34b57f26b8a5507b044e5a8e89b38ca24fbf6f3880b3c5bf367c012726c1" gracePeriod=600 Nov 28 16:20:33 crc kubenswrapper[4954]: I1128 16:20:33.477443 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="c0ae34b57f26b8a5507b044e5a8e89b38ca24fbf6f3880b3c5bf367c012726c1" exitCode=0 Nov 28 16:20:33 crc kubenswrapper[4954]: I1128 16:20:33.477506 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"c0ae34b57f26b8a5507b044e5a8e89b38ca24fbf6f3880b3c5bf367c012726c1"} Nov 28 16:20:33 crc kubenswrapper[4954]: I1128 16:20:33.477837 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"10fedf83578e5a7a1a89aad8135351fe55542f6bf779f4911dd9e5f625bbbf3b"} Nov 28 16:20:33 crc kubenswrapper[4954]: I1128 16:20:33.477865 4954 scope.go:117] "RemoveContainer" containerID="cb24ea3233adacc195fae8733d377a677bc6d6ac461d682e127e50d4c1e27874" Nov 28 16:22:48 crc kubenswrapper[4954]: I1128 16:22:48.643746 4954 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 16:23:02 crc kubenswrapper[4954]: I1128 16:23:02.481132 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:02 crc kubenswrapper[4954]: I1128 16:23:02.481848 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:32 crc kubenswrapper[4954]: I1128 16:23:32.481140 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:32 crc kubenswrapper[4954]: I1128 16:23:32.482047 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.547824 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wrv7t"] Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.549288 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-controller" containerID="cri-o://c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.549345 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="nbdb" containerID="cri-o://2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.549423 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="sbdb" containerID="cri-o://6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.549455 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-acl-logging" containerID="cri-o://3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.549576 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-node" containerID="cri-o://383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.549784 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="northd" containerID="cri-o://7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.549845 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.589243 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" containerID="cri-o://c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" gracePeriod=30 Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.846921 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/2.log" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.849150 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovn-acl-logging/0.log" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.850029 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovn-controller/0.log" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.850547 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.905901 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-9xqz9"] Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906132 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906143 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906153 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906159 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906168 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="northd" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906174 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="northd" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906182 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906189 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906196 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d42a5371-0448-48d1-8df8-aee818ea6644" containerName="registry" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906202 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d42a5371-0448-48d1-8df8-aee818ea6644" containerName="registry" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906211 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906216 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906226 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906231 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906240 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906245 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906252 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="sbdb" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906258 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="sbdb" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906265 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-acl-logging" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906271 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-acl-logging" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906280 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kubecfg-setup" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906286 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kubecfg-setup" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906292 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="nbdb" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906298 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="nbdb" Nov 28 16:24:01 crc kubenswrapper[4954]: E1128 16:24:01.906307 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-node" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906312 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-node" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906403 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906412 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="nbdb" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906418 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d42a5371-0448-48d1-8df8-aee818ea6644" containerName="registry" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906428 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-acl-logging" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906435 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="northd" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906442 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906450 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovn-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906458 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-node" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906467 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906475 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906480 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="sbdb" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.906700 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerName="ovnkube-controller" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.908219 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.969690 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-env-overrides\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970163 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-ovn-kubernetes\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970193 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-openvswitch\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970317 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-slash\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970265 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970315 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970381 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-slash" (OuterVolumeSpecName: "host-slash") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970414 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-systemd\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970454 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-var-lib-openvswitch\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970493 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d782w\" (UniqueName: \"kubernetes.io/projected/4fb02adc-75ef-4e63-841d-7fa817cc8da2-kube-api-access-d782w\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970515 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-etc-openvswitch\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970552 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-systemd-units\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970539 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970593 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovn-node-metrics-cert\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970621 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-script-lib\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970642 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-var-lib-cni-networks-ovn-kubernetes\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970668 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-kubelet\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970701 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-log-socket\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970731 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-ovn\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970757 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-netd\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970776 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-node-log\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970796 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-netns\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970824 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-config\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970848 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-bin\") pod \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\" (UID: \"4fb02adc-75ef-4e63-841d-7fa817cc8da2\") " Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971061 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-slash\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971092 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-run-netns\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971113 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-log-socket\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971157 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntqxv\" (UniqueName: \"kubernetes.io/projected/48aaf821-4b58-437e-a6c1-38553cec1546-kube-api-access-ntqxv\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971196 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/48aaf821-4b58-437e-a6c1-38553cec1546-ovn-node-metrics-cert\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971236 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-ovn\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971258 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-node-log\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971279 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-cni-bin\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971308 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-run-ovn-kubernetes\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971349 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-etc-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971374 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-ovnkube-script-lib\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971396 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-cni-netd\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971474 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-env-overrides\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970564 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.970623 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971509 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971551 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971570 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971608 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-node-log" (OuterVolumeSpecName: "node-log") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971646 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971678 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971700 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971731 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-log-socket" (OuterVolumeSpecName: "log-socket") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971762 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.971970 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972026 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972042 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-var-lib-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972071 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972183 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-kubelet\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972226 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-ovnkube-config\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972303 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-systemd\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972356 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-systemd-units\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972420 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972647 4954 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972696 4954 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972722 4954 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972735 4954 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972747 4954 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972757 4954 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972766 4954 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972774 4954 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972783 4954 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972795 4954 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972805 4954 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972815 4954 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972825 4954 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972836 4954 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972845 4954 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.972855 4954 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.976679 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fb02adc-75ef-4e63-841d-7fa817cc8da2-kube-api-access-d782w" (OuterVolumeSpecName: "kube-api-access-d782w") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "kube-api-access-d782w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.977662 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:24:01 crc kubenswrapper[4954]: I1128 16:24:01.983678 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "4fb02adc-75ef-4e63-841d-7fa817cc8da2" (UID: "4fb02adc-75ef-4e63-841d-7fa817cc8da2"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074309 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntqxv\" (UniqueName: \"kubernetes.io/projected/48aaf821-4b58-437e-a6c1-38553cec1546-kube-api-access-ntqxv\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074376 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/48aaf821-4b58-437e-a6c1-38553cec1546-ovn-node-metrics-cert\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074423 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-ovn\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074476 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-node-log\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074501 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-cni-bin\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074581 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-run-ovn-kubernetes\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074608 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-etc-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074633 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-ovnkube-script-lib\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074651 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-cni-netd\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074672 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-env-overrides\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074698 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-kubelet\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074719 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-var-lib-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074742 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074764 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-ovnkube-config\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074786 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-systemd\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074813 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-systemd-units\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074804 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-run-ovn-kubernetes\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074867 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074836 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074901 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-kubelet\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074932 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-var-lib-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074946 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-slash\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075008 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-run-netns\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075059 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-log-socket\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075180 4954 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075220 4954 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4fb02adc-75ef-4e63-841d-7fa817cc8da2-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075255 4954 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4fb02adc-75ef-4e63-841d-7fa817cc8da2-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075285 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d782w\" (UniqueName: \"kubernetes.io/projected/4fb02adc-75ef-4e63-841d-7fa817cc8da2-kube-api-access-d782w\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075349 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-cni-bin\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075410 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-slash\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075432 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-systemd\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.074960 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075440 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-systemd-units\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075478 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-run-ovn\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075360 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-log-socket\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075474 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-cni-netd\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075505 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-env-overrides\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075565 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-node-log\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075578 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-etc-openvswitch\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075586 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/48aaf821-4b58-437e-a6c1-38553cec1546-host-run-netns\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.075868 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-ovnkube-config\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.076092 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/48aaf821-4b58-437e-a6c1-38553cec1546-ovnkube-script-lib\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.079274 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/48aaf821-4b58-437e-a6c1-38553cec1546-ovn-node-metrics-cert\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.094015 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntqxv\" (UniqueName: \"kubernetes.io/projected/48aaf821-4b58-437e-a6c1-38553cec1546-kube-api-access-ntqxv\") pod \"ovnkube-node-9xqz9\" (UID: \"48aaf821-4b58-437e-a6c1-38553cec1546\") " pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.230112 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.252676 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-87jtn_d5ee5420-ed17-4059-8d54-3b486c2ffd1d/kube-multus/1.log" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.253117 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-87jtn_d5ee5420-ed17-4059-8d54-3b486c2ffd1d/kube-multus/0.log" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.253164 4954 generic.go:334] "Generic (PLEG): container finished" podID="d5ee5420-ed17-4059-8d54-3b486c2ffd1d" containerID="47d5b77cc83f542563384a97dff022d8292541a330da7f0f775e7cbcb6ef72dc" exitCode=2 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.253221 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-87jtn" event={"ID":"d5ee5420-ed17-4059-8d54-3b486c2ffd1d","Type":"ContainerDied","Data":"47d5b77cc83f542563384a97dff022d8292541a330da7f0f775e7cbcb6ef72dc"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.253335 4954 scope.go:117] "RemoveContainer" containerID="2fd640776bc90cab48525ec7df60b65c4615c7e9443cbe52a9ec50fde7a9590c" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.254011 4954 scope.go:117] "RemoveContainer" containerID="47d5b77cc83f542563384a97dff022d8292541a330da7f0f775e7cbcb6ef72dc" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.255775 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovnkube-controller/2.log" Nov 28 16:24:02 crc kubenswrapper[4954]: W1128 16:24:02.257331 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48aaf821_4b58_437e_a6c1_38553cec1546.slice/crio-6fae0d7e1eeac8dd61b3bca059270d5091d600a528466d210082270c59a6e232 WatchSource:0}: Error finding container 6fae0d7e1eeac8dd61b3bca059270d5091d600a528466d210082270c59a6e232: Status 404 returned error can't find the container with id 6fae0d7e1eeac8dd61b3bca059270d5091d600a528466d210082270c59a6e232 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.260896 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovn-acl-logging/0.log" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263156 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wrv7t_4fb02adc-75ef-4e63-841d-7fa817cc8da2/ovn-controller/0.log" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263763 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" exitCode=0 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263831 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" exitCode=0 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263841 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" exitCode=0 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263850 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" exitCode=0 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263862 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" exitCode=0 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263874 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" exitCode=0 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263885 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" exitCode=143 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263896 4954 generic.go:334] "Generic (PLEG): container finished" podID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" containerID="c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" exitCode=143 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.263959 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264004 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264024 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264040 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264056 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264066 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264072 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264335 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264352 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264359 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264365 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264378 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264385 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264392 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264398 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264404 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264410 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264428 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264445 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264454 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264460 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264467 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264473 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264480 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264487 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264493 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264499 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264506 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264518 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264544 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264553 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264559 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264566 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264572 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264580 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264586 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264592 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264599 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264605 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264615 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wrv7t" event={"ID":"4fb02adc-75ef-4e63-841d-7fa817cc8da2","Type":"ContainerDied","Data":"d61f44f1b0425bbd05df40555cb7051e775d884b06af8a9817d43b9fa6c9d249"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264626 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264633 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264639 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264646 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264652 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264658 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264664 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264670 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264677 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.264683 4954 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.299180 4954 scope.go:117] "RemoveContainer" containerID="c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.335324 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wrv7t"] Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.338431 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.343817 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wrv7t"] Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.367207 4954 scope.go:117] "RemoveContainer" containerID="6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.380644 4954 scope.go:117] "RemoveContainer" containerID="2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.396288 4954 scope.go:117] "RemoveContainer" containerID="7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.411290 4954 scope.go:117] "RemoveContainer" containerID="de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.432292 4954 scope.go:117] "RemoveContainer" containerID="383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.480696 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.480773 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.480824 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.481359 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"10fedf83578e5a7a1a89aad8135351fe55542f6bf779f4911dd9e5f625bbbf3b"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.481434 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://10fedf83578e5a7a1a89aad8135351fe55542f6bf779f4911dd9e5f625bbbf3b" gracePeriod=600 Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.507383 4954 scope.go:117] "RemoveContainer" containerID="3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.529849 4954 scope.go:117] "RemoveContainer" containerID="c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.557023 4954 scope.go:117] "RemoveContainer" containerID="15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.573734 4954 scope.go:117] "RemoveContainer" containerID="c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.574294 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": container with ID starting with c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320 not found: ID does not exist" containerID="c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.574330 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} err="failed to get container status \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": rpc error: code = NotFound desc = could not find container \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": container with ID starting with c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.574376 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.574919 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": container with ID starting with 8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8 not found: ID does not exist" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.574976 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} err="failed to get container status \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": rpc error: code = NotFound desc = could not find container \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": container with ID starting with 8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.574992 4954 scope.go:117] "RemoveContainer" containerID="6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.575327 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": container with ID starting with 6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134 not found: ID does not exist" containerID="6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.575411 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} err="failed to get container status \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": rpc error: code = NotFound desc = could not find container \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": container with ID starting with 6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.575447 4954 scope.go:117] "RemoveContainer" containerID="2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.575948 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": container with ID starting with 2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f not found: ID does not exist" containerID="2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.575987 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} err="failed to get container status \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": rpc error: code = NotFound desc = could not find container \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": container with ID starting with 2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.576023 4954 scope.go:117] "RemoveContainer" containerID="7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.576278 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": container with ID starting with 7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974 not found: ID does not exist" containerID="7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.576326 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} err="failed to get container status \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": rpc error: code = NotFound desc = could not find container \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": container with ID starting with 7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.576361 4954 scope.go:117] "RemoveContainer" containerID="de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.576748 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": container with ID starting with de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa not found: ID does not exist" containerID="de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.576800 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} err="failed to get container status \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": rpc error: code = NotFound desc = could not find container \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": container with ID starting with de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.576819 4954 scope.go:117] "RemoveContainer" containerID="383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.577409 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": container with ID starting with 383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678 not found: ID does not exist" containerID="383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.577468 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} err="failed to get container status \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": rpc error: code = NotFound desc = could not find container \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": container with ID starting with 383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.577482 4954 scope.go:117] "RemoveContainer" containerID="3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.577893 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": container with ID starting with 3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb not found: ID does not exist" containerID="3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.577956 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} err="failed to get container status \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": rpc error: code = NotFound desc = could not find container \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": container with ID starting with 3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.577983 4954 scope.go:117] "RemoveContainer" containerID="c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.578366 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": container with ID starting with c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f not found: ID does not exist" containerID="c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.578414 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} err="failed to get container status \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": rpc error: code = NotFound desc = could not find container \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": container with ID starting with c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.578431 4954 scope.go:117] "RemoveContainer" containerID="15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043" Nov 28 16:24:02 crc kubenswrapper[4954]: E1128 16:24:02.578869 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": container with ID starting with 15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043 not found: ID does not exist" containerID="15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.578925 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} err="failed to get container status \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": rpc error: code = NotFound desc = could not find container \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": container with ID starting with 15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.578944 4954 scope.go:117] "RemoveContainer" containerID="c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.579243 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} err="failed to get container status \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": rpc error: code = NotFound desc = could not find container \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": container with ID starting with c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.579278 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.579655 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} err="failed to get container status \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": rpc error: code = NotFound desc = could not find container \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": container with ID starting with 8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.579682 4954 scope.go:117] "RemoveContainer" containerID="6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.580200 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} err="failed to get container status \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": rpc error: code = NotFound desc = could not find container \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": container with ID starting with 6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.580229 4954 scope.go:117] "RemoveContainer" containerID="2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.580578 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} err="failed to get container status \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": rpc error: code = NotFound desc = could not find container \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": container with ID starting with 2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.580621 4954 scope.go:117] "RemoveContainer" containerID="7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.581087 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} err="failed to get container status \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": rpc error: code = NotFound desc = could not find container \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": container with ID starting with 7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.581117 4954 scope.go:117] "RemoveContainer" containerID="de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.582199 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} err="failed to get container status \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": rpc error: code = NotFound desc = could not find container \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": container with ID starting with de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.582259 4954 scope.go:117] "RemoveContainer" containerID="383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.582850 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} err="failed to get container status \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": rpc error: code = NotFound desc = could not find container \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": container with ID starting with 383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.582928 4954 scope.go:117] "RemoveContainer" containerID="3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.583346 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} err="failed to get container status \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": rpc error: code = NotFound desc = could not find container \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": container with ID starting with 3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.583426 4954 scope.go:117] "RemoveContainer" containerID="c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.583800 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} err="failed to get container status \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": rpc error: code = NotFound desc = could not find container \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": container with ID starting with c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.583840 4954 scope.go:117] "RemoveContainer" containerID="15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.584096 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} err="failed to get container status \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": rpc error: code = NotFound desc = could not find container \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": container with ID starting with 15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.584118 4954 scope.go:117] "RemoveContainer" containerID="c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.584456 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} err="failed to get container status \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": rpc error: code = NotFound desc = could not find container \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": container with ID starting with c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.584480 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.584859 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} err="failed to get container status \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": rpc error: code = NotFound desc = could not find container \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": container with ID starting with 8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.584902 4954 scope.go:117] "RemoveContainer" containerID="6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.585176 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} err="failed to get container status \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": rpc error: code = NotFound desc = could not find container \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": container with ID starting with 6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.585200 4954 scope.go:117] "RemoveContainer" containerID="2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.585475 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} err="failed to get container status \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": rpc error: code = NotFound desc = could not find container \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": container with ID starting with 2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.585496 4954 scope.go:117] "RemoveContainer" containerID="7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.585821 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} err="failed to get container status \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": rpc error: code = NotFound desc = could not find container \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": container with ID starting with 7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.585860 4954 scope.go:117] "RemoveContainer" containerID="de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.586171 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} err="failed to get container status \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": rpc error: code = NotFound desc = could not find container \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": container with ID starting with de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.586192 4954 scope.go:117] "RemoveContainer" containerID="383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.586556 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} err="failed to get container status \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": rpc error: code = NotFound desc = could not find container \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": container with ID starting with 383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.586584 4954 scope.go:117] "RemoveContainer" containerID="3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.586870 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} err="failed to get container status \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": rpc error: code = NotFound desc = could not find container \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": container with ID starting with 3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.586889 4954 scope.go:117] "RemoveContainer" containerID="c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.587251 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} err="failed to get container status \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": rpc error: code = NotFound desc = could not find container \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": container with ID starting with c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.587291 4954 scope.go:117] "RemoveContainer" containerID="15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.587591 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} err="failed to get container status \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": rpc error: code = NotFound desc = could not find container \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": container with ID starting with 15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.587629 4954 scope.go:117] "RemoveContainer" containerID="c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.587883 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320"} err="failed to get container status \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": rpc error: code = NotFound desc = could not find container \"c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320\": container with ID starting with c589901f3519807d5357f0e5de1f3ee21b95e26f3879cba5dac94b774ad34320 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.587921 4954 scope.go:117] "RemoveContainer" containerID="8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588112 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8"} err="failed to get container status \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": rpc error: code = NotFound desc = could not find container \"8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8\": container with ID starting with 8f179a2e992a576b91cba6a657f9cdbedf9a89603a9b3994f9877ee7b30dcab8 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588135 4954 scope.go:117] "RemoveContainer" containerID="6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588420 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134"} err="failed to get container status \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": rpc error: code = NotFound desc = could not find container \"6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134\": container with ID starting with 6d7d172e4c7182ee197c710eb6a30add3a3ea5d3b38445e2dcb9a63119bac134 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588450 4954 scope.go:117] "RemoveContainer" containerID="2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588708 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f"} err="failed to get container status \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": rpc error: code = NotFound desc = could not find container \"2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f\": container with ID starting with 2231f9e65d050d268159429d76c49114eee3b6124643213fd70a3550a7c1456f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588754 4954 scope.go:117] "RemoveContainer" containerID="7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588930 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974"} err="failed to get container status \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": rpc error: code = NotFound desc = could not find container \"7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974\": container with ID starting with 7a2a0a8b70e716a83d1c115a497782917f74fe63ae44aaa850b1591efc030974 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.588949 4954 scope.go:117] "RemoveContainer" containerID="de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589136 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa"} err="failed to get container status \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": rpc error: code = NotFound desc = could not find container \"de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa\": container with ID starting with de22bd0075ff8f0caaca906ea6bdb8c68383317951dbb8fb12e01a8c3c5f4cfa not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589158 4954 scope.go:117] "RemoveContainer" containerID="383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589363 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678"} err="failed to get container status \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": rpc error: code = NotFound desc = could not find container \"383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678\": container with ID starting with 383f20f6fab713f16e75acb9c1d0acc646904c82f3551a52cff2f52f72a76678 not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589383 4954 scope.go:117] "RemoveContainer" containerID="3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589594 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb"} err="failed to get container status \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": rpc error: code = NotFound desc = could not find container \"3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb\": container with ID starting with 3660cc66226dfb47ceefdc3e95e500bbf70cf605ba83e540979838a550a9dffb not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589612 4954 scope.go:117] "RemoveContainer" containerID="c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589831 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f"} err="failed to get container status \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": rpc error: code = NotFound desc = could not find container \"c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f\": container with ID starting with c76d54836015e616c6d9cd34d384012b6eb95ee3f88086c1849cf8523b22415f not found: ID does not exist" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.589849 4954 scope.go:117] "RemoveContainer" containerID="15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043" Nov 28 16:24:02 crc kubenswrapper[4954]: I1128 16:24:02.590057 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043"} err="failed to get container status \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": rpc error: code = NotFound desc = could not find container \"15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043\": container with ID starting with 15d8fa9dfc19fc6ac63172739c016f743e10f471a72cbcb830db89927fa79043 not found: ID does not exist" Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.273432 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-87jtn_d5ee5420-ed17-4059-8d54-3b486c2ffd1d/kube-multus/1.log" Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.274128 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-87jtn" event={"ID":"d5ee5420-ed17-4059-8d54-3b486c2ffd1d","Type":"ContainerStarted","Data":"a2d1cab6566a021b696061e0be382752784fdbee78dff6cf076cd4fc82e99cd3"} Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.279871 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="10fedf83578e5a7a1a89aad8135351fe55542f6bf779f4911dd9e5f625bbbf3b" exitCode=0 Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.279915 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"10fedf83578e5a7a1a89aad8135351fe55542f6bf779f4911dd9e5f625bbbf3b"} Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.279971 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"81405dd859c5dce906875516afa2e66fb81aa5bf95b3268c18658c9aa3d313f9"} Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.280000 4954 scope.go:117] "RemoveContainer" containerID="c0ae34b57f26b8a5507b044e5a8e89b38ca24fbf6f3880b3c5bf367c012726c1" Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.282205 4954 generic.go:334] "Generic (PLEG): container finished" podID="48aaf821-4b58-437e-a6c1-38553cec1546" containerID="4be8edcc99d9e52b7bac978063a5eb9d9796c34670b3bc0bb96604939fc7a99d" exitCode=0 Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.282256 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerDied","Data":"4be8edcc99d9e52b7bac978063a5eb9d9796c34670b3bc0bb96604939fc7a99d"} Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.282296 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"6fae0d7e1eeac8dd61b3bca059270d5091d600a528466d210082270c59a6e232"} Nov 28 16:24:03 crc kubenswrapper[4954]: I1128 16:24:03.873161 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fb02adc-75ef-4e63-841d-7fa817cc8da2" path="/var/lib/kubelet/pods/4fb02adc-75ef-4e63-841d-7fa817cc8da2/volumes" Nov 28 16:24:04 crc kubenswrapper[4954]: I1128 16:24:04.301364 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"97cd5711be57d2698cc5e55e7455e9f98146b7567e55dcbe55ca86607bfb1bdb"} Nov 28 16:24:04 crc kubenswrapper[4954]: I1128 16:24:04.301950 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"9e90fb8e72acdd07e17879447d7fb613a558e2b49c8d0d8db37976acec8c8673"} Nov 28 16:24:04 crc kubenswrapper[4954]: I1128 16:24:04.301985 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"e5722cd5498012b4dbc888a0bfc2ffd55636c4ba0e1806fc34ad7947cf905f1f"} Nov 28 16:24:04 crc kubenswrapper[4954]: I1128 16:24:04.301997 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"a54a3cba48e74a2b4953fae10cb922912faf8675fc181835e66740299be6cb71"} Nov 28 16:24:04 crc kubenswrapper[4954]: I1128 16:24:04.302008 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"786bdd6952273ca553106f0a2c2f647af4962db964b9fa79fe43ac30a0e964a1"} Nov 28 16:24:04 crc kubenswrapper[4954]: I1128 16:24:04.302019 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"099fbc60bb66312ff8e02c21b3b1e4939217ebe2362a168329c96daa2295b65c"} Nov 28 16:24:06 crc kubenswrapper[4954]: I1128 16:24:06.317757 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"f92cee5ad85ffee5e8c1957ff89a77d53515016cd48a60e3428fa19353c2d6d8"} Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.133571 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-whdm2"] Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.134792 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.136993 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.137698 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.137821 4954 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-9n7l8" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.138136 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.262100 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5b432f8e-16f5-44e4-97e8-eda21636fa41-node-mnt\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.262211 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5b432f8e-16f5-44e4-97e8-eda21636fa41-crc-storage\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.262258 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8j65f\" (UniqueName: \"kubernetes.io/projected/5b432f8e-16f5-44e4-97e8-eda21636fa41-kube-api-access-8j65f\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.363595 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8j65f\" (UniqueName: \"kubernetes.io/projected/5b432f8e-16f5-44e4-97e8-eda21636fa41-kube-api-access-8j65f\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.363692 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5b432f8e-16f5-44e4-97e8-eda21636fa41-node-mnt\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.363756 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5b432f8e-16f5-44e4-97e8-eda21636fa41-crc-storage\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.364466 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5b432f8e-16f5-44e4-97e8-eda21636fa41-crc-storage\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.365165 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5b432f8e-16f5-44e4-97e8-eda21636fa41-node-mnt\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.381874 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8j65f\" (UniqueName: \"kubernetes.io/projected/5b432f8e-16f5-44e4-97e8-eda21636fa41-kube-api-access-8j65f\") pod \"crc-storage-crc-whdm2\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: I1128 16:24:08.456111 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: E1128 16:24:08.474778 4954 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(f4954d142828515161c52b95f4d614dd500f6300af70191a1a469b3b6f23977a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 16:24:08 crc kubenswrapper[4954]: E1128 16:24:08.474884 4954 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(f4954d142828515161c52b95f4d614dd500f6300af70191a1a469b3b6f23977a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: E1128 16:24:08.474925 4954 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(f4954d142828515161c52b95f4d614dd500f6300af70191a1a469b3b6f23977a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:08 crc kubenswrapper[4954]: E1128 16:24:08.475014 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-whdm2_crc-storage(5b432f8e-16f5-44e4-97e8-eda21636fa41)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-whdm2_crc-storage(5b432f8e-16f5-44e4-97e8-eda21636fa41)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(f4954d142828515161c52b95f4d614dd500f6300af70191a1a469b3b6f23977a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-whdm2" podUID="5b432f8e-16f5-44e4-97e8-eda21636fa41" Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.344054 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" event={"ID":"48aaf821-4b58-437e-a6c1-38553cec1546","Type":"ContainerStarted","Data":"ce01c945bd4d6b4be864c8efd3f80a3f8c67fa5fc9c4a1f7cf93a362cf8384da"} Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.345402 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.345487 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.370957 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.379132 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" podStartSLOduration=9.379113726 podStartE2EDuration="9.379113726s" podCreationTimestamp="2025-11-28 16:24:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:24:10.372735046 +0000 UTC m=+803.764403587" watchObservedRunningTime="2025-11-28 16:24:10.379113726 +0000 UTC m=+803.770782267" Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.651008 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-whdm2"] Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.651149 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:10 crc kubenswrapper[4954]: I1128 16:24:10.651768 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:10 crc kubenswrapper[4954]: E1128 16:24:10.681685 4954 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(824594b921cf30854417a526eaf41d4cb67a7c7ebdeb1a7a878af5d43fabe164): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 16:24:10 crc kubenswrapper[4954]: E1128 16:24:10.681757 4954 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(824594b921cf30854417a526eaf41d4cb67a7c7ebdeb1a7a878af5d43fabe164): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:10 crc kubenswrapper[4954]: E1128 16:24:10.681780 4954 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(824594b921cf30854417a526eaf41d4cb67a7c7ebdeb1a7a878af5d43fabe164): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:10 crc kubenswrapper[4954]: E1128 16:24:10.681831 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-whdm2_crc-storage(5b432f8e-16f5-44e4-97e8-eda21636fa41)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-whdm2_crc-storage(5b432f8e-16f5-44e4-97e8-eda21636fa41)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-whdm2_crc-storage_5b432f8e-16f5-44e4-97e8-eda21636fa41_0(824594b921cf30854417a526eaf41d4cb67a7c7ebdeb1a7a878af5d43fabe164): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-whdm2" podUID="5b432f8e-16f5-44e4-97e8-eda21636fa41" Nov 28 16:24:11 crc kubenswrapper[4954]: I1128 16:24:11.348641 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:11 crc kubenswrapper[4954]: I1128 16:24:11.379455 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:21 crc kubenswrapper[4954]: I1128 16:24:21.855557 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:21 crc kubenswrapper[4954]: I1128 16:24:21.856887 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:22 crc kubenswrapper[4954]: I1128 16:24:22.060366 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-whdm2"] Nov 28 16:24:22 crc kubenswrapper[4954]: W1128 16:24:22.062947 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b432f8e_16f5_44e4_97e8_eda21636fa41.slice/crio-49903e78d36d3d18f06684dcfc297f660648b612e3cfb491d809acbe2cdf0d69 WatchSource:0}: Error finding container 49903e78d36d3d18f06684dcfc297f660648b612e3cfb491d809acbe2cdf0d69: Status 404 returned error can't find the container with id 49903e78d36d3d18f06684dcfc297f660648b612e3cfb491d809acbe2cdf0d69 Nov 28 16:24:22 crc kubenswrapper[4954]: I1128 16:24:22.065173 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:24:22 crc kubenswrapper[4954]: I1128 16:24:22.434193 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-whdm2" event={"ID":"5b432f8e-16f5-44e4-97e8-eda21636fa41","Type":"ContainerStarted","Data":"49903e78d36d3d18f06684dcfc297f660648b612e3cfb491d809acbe2cdf0d69"} Nov 28 16:24:24 crc kubenswrapper[4954]: I1128 16:24:24.447419 4954 generic.go:334] "Generic (PLEG): container finished" podID="5b432f8e-16f5-44e4-97e8-eda21636fa41" containerID="f7819e92a94269bbfc9d1565d434c2df0f8fc9a4795e5192e86603047be5ae09" exitCode=0 Nov 28 16:24:24 crc kubenswrapper[4954]: I1128 16:24:24.447553 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-whdm2" event={"ID":"5b432f8e-16f5-44e4-97e8-eda21636fa41","Type":"ContainerDied","Data":"f7819e92a94269bbfc9d1565d434c2df0f8fc9a4795e5192e86603047be5ae09"} Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.708964 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.808555 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5b432f8e-16f5-44e4-97e8-eda21636fa41-crc-storage\") pod \"5b432f8e-16f5-44e4-97e8-eda21636fa41\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.808967 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5b432f8e-16f5-44e4-97e8-eda21636fa41-node-mnt\") pod \"5b432f8e-16f5-44e4-97e8-eda21636fa41\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.809065 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8j65f\" (UniqueName: \"kubernetes.io/projected/5b432f8e-16f5-44e4-97e8-eda21636fa41-kube-api-access-8j65f\") pod \"5b432f8e-16f5-44e4-97e8-eda21636fa41\" (UID: \"5b432f8e-16f5-44e4-97e8-eda21636fa41\") " Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.809114 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b432f8e-16f5-44e4-97e8-eda21636fa41-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "5b432f8e-16f5-44e4-97e8-eda21636fa41" (UID: "5b432f8e-16f5-44e4-97e8-eda21636fa41"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.809332 4954 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5b432f8e-16f5-44e4-97e8-eda21636fa41-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.814720 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b432f8e-16f5-44e4-97e8-eda21636fa41-kube-api-access-8j65f" (OuterVolumeSpecName: "kube-api-access-8j65f") pod "5b432f8e-16f5-44e4-97e8-eda21636fa41" (UID: "5b432f8e-16f5-44e4-97e8-eda21636fa41"). InnerVolumeSpecName "kube-api-access-8j65f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.828538 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b432f8e-16f5-44e4-97e8-eda21636fa41-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "5b432f8e-16f5-44e4-97e8-eda21636fa41" (UID: "5b432f8e-16f5-44e4-97e8-eda21636fa41"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.910961 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8j65f\" (UniqueName: \"kubernetes.io/projected/5b432f8e-16f5-44e4-97e8-eda21636fa41-kube-api-access-8j65f\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:25 crc kubenswrapper[4954]: I1128 16:24:25.910990 4954 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5b432f8e-16f5-44e4-97e8-eda21636fa41-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:26 crc kubenswrapper[4954]: I1128 16:24:26.463733 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-whdm2" event={"ID":"5b432f8e-16f5-44e4-97e8-eda21636fa41","Type":"ContainerDied","Data":"49903e78d36d3d18f06684dcfc297f660648b612e3cfb491d809acbe2cdf0d69"} Nov 28 16:24:26 crc kubenswrapper[4954]: I1128 16:24:26.463808 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49903e78d36d3d18f06684dcfc297f660648b612e3cfb491d809acbe2cdf0d69" Nov 28 16:24:26 crc kubenswrapper[4954]: I1128 16:24:26.463875 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-whdm2" Nov 28 16:24:32 crc kubenswrapper[4954]: I1128 16:24:32.257931 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-9xqz9" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.132404 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5"] Nov 28 16:24:33 crc kubenswrapper[4954]: E1128 16:24:33.133032 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b432f8e-16f5-44e4-97e8-eda21636fa41" containerName="storage" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.133051 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b432f8e-16f5-44e4-97e8-eda21636fa41" containerName="storage" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.133191 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b432f8e-16f5-44e4-97e8-eda21636fa41" containerName="storage" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.134142 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.136320 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.143842 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5"] Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.215597 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.215709 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltc4p\" (UniqueName: \"kubernetes.io/projected/180f36bc-8328-43c7-92fc-f3bf6deee7ef-kube-api-access-ltc4p\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.215750 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.316849 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltc4p\" (UniqueName: \"kubernetes.io/projected/180f36bc-8328-43c7-92fc-f3bf6deee7ef-kube-api-access-ltc4p\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.316914 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.316961 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.317636 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.318138 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.353039 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltc4p\" (UniqueName: \"kubernetes.io/projected/180f36bc-8328-43c7-92fc-f3bf6deee7ef-kube-api-access-ltc4p\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.450977 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:33 crc kubenswrapper[4954]: I1128 16:24:33.700241 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5"] Nov 28 16:24:34 crc kubenswrapper[4954]: I1128 16:24:34.514459 4954 generic.go:334] "Generic (PLEG): container finished" podID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerID="e170c93a94d45129c9e7f8a5a040898317db5c6d1fbd397887bfcceca7300204" exitCode=0 Nov 28 16:24:34 crc kubenswrapper[4954]: I1128 16:24:34.514613 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" event={"ID":"180f36bc-8328-43c7-92fc-f3bf6deee7ef","Type":"ContainerDied","Data":"e170c93a94d45129c9e7f8a5a040898317db5c6d1fbd397887bfcceca7300204"} Nov 28 16:24:34 crc kubenswrapper[4954]: I1128 16:24:34.514851 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" event={"ID":"180f36bc-8328-43c7-92fc-f3bf6deee7ef","Type":"ContainerStarted","Data":"f09b4623f67d1667ea2d22178b04f9ce0d2f26c6f98319a13e76a19f2dcd8b86"} Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.467450 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jdx7r"] Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.469674 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.482278 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jdx7r"] Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.549078 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-catalog-content\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.549259 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sl56\" (UniqueName: \"kubernetes.io/projected/16789222-50f1-4060-808b-2915c3ef2ad9-kube-api-access-5sl56\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.549366 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-utilities\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.650572 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-catalog-content\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.650851 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sl56\" (UniqueName: \"kubernetes.io/projected/16789222-50f1-4060-808b-2915c3ef2ad9-kube-api-access-5sl56\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.650958 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-utilities\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.651247 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-catalog-content\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.651382 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-utilities\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.673807 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sl56\" (UniqueName: \"kubernetes.io/projected/16789222-50f1-4060-808b-2915c3ef2ad9-kube-api-access-5sl56\") pod \"redhat-operators-jdx7r\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:35 crc kubenswrapper[4954]: I1128 16:24:35.787315 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:36 crc kubenswrapper[4954]: I1128 16:24:36.222385 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jdx7r"] Nov 28 16:24:36 crc kubenswrapper[4954]: W1128 16:24:36.222602 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16789222_50f1_4060_808b_2915c3ef2ad9.slice/crio-9c737026e15056e1d0e033d4d6ba25241ac60eeb59f575abaeaee040ce72a58d WatchSource:0}: Error finding container 9c737026e15056e1d0e033d4d6ba25241ac60eeb59f575abaeaee040ce72a58d: Status 404 returned error can't find the container with id 9c737026e15056e1d0e033d4d6ba25241ac60eeb59f575abaeaee040ce72a58d Nov 28 16:24:36 crc kubenswrapper[4954]: I1128 16:24:36.527319 4954 generic.go:334] "Generic (PLEG): container finished" podID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerID="67b9fc4ccfbfe1f1d96623dc5f678fa23874e5ed7aae6d65853759a6ac3bf148" exitCode=0 Nov 28 16:24:36 crc kubenswrapper[4954]: I1128 16:24:36.527477 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" event={"ID":"180f36bc-8328-43c7-92fc-f3bf6deee7ef","Type":"ContainerDied","Data":"67b9fc4ccfbfe1f1d96623dc5f678fa23874e5ed7aae6d65853759a6ac3bf148"} Nov 28 16:24:36 crc kubenswrapper[4954]: I1128 16:24:36.529257 4954 generic.go:334] "Generic (PLEG): container finished" podID="16789222-50f1-4060-808b-2915c3ef2ad9" containerID="175e2021d071cbd9fcbec61c01250c8fba3f20137755c3bcdb9c39e74b154f5c" exitCode=0 Nov 28 16:24:36 crc kubenswrapper[4954]: I1128 16:24:36.529330 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jdx7r" event={"ID":"16789222-50f1-4060-808b-2915c3ef2ad9","Type":"ContainerDied","Data":"175e2021d071cbd9fcbec61c01250c8fba3f20137755c3bcdb9c39e74b154f5c"} Nov 28 16:24:36 crc kubenswrapper[4954]: I1128 16:24:36.529358 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jdx7r" event={"ID":"16789222-50f1-4060-808b-2915c3ef2ad9","Type":"ContainerStarted","Data":"9c737026e15056e1d0e033d4d6ba25241ac60eeb59f575abaeaee040ce72a58d"} Nov 28 16:24:37 crc kubenswrapper[4954]: I1128 16:24:37.538749 4954 generic.go:334] "Generic (PLEG): container finished" podID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerID="6556486e3ae20f7d9b6243d5df3c8ba4f700d6bcef8491eef98b5759209ec917" exitCode=0 Nov 28 16:24:37 crc kubenswrapper[4954]: I1128 16:24:37.538852 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" event={"ID":"180f36bc-8328-43c7-92fc-f3bf6deee7ef","Type":"ContainerDied","Data":"6556486e3ae20f7d9b6243d5df3c8ba4f700d6bcef8491eef98b5759209ec917"} Nov 28 16:24:37 crc kubenswrapper[4954]: I1128 16:24:37.542355 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jdx7r" event={"ID":"16789222-50f1-4060-808b-2915c3ef2ad9","Type":"ContainerStarted","Data":"94d1bf1a023356724c72b03bf6f79c6098ce8a5f4b13be403f19a8f14f0d0ea7"} Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.551873 4954 generic.go:334] "Generic (PLEG): container finished" podID="16789222-50f1-4060-808b-2915c3ef2ad9" containerID="94d1bf1a023356724c72b03bf6f79c6098ce8a5f4b13be403f19a8f14f0d0ea7" exitCode=0 Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.552014 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jdx7r" event={"ID":"16789222-50f1-4060-808b-2915c3ef2ad9","Type":"ContainerDied","Data":"94d1bf1a023356724c72b03bf6f79c6098ce8a5f4b13be403f19a8f14f0d0ea7"} Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.863990 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.898810 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltc4p\" (UniqueName: \"kubernetes.io/projected/180f36bc-8328-43c7-92fc-f3bf6deee7ef-kube-api-access-ltc4p\") pod \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.899008 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-util\") pod \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.899086 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-bundle\") pod \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\" (UID: \"180f36bc-8328-43c7-92fc-f3bf6deee7ef\") " Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.899941 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-bundle" (OuterVolumeSpecName: "bundle") pod "180f36bc-8328-43c7-92fc-f3bf6deee7ef" (UID: "180f36bc-8328-43c7-92fc-f3bf6deee7ef"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.910770 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/180f36bc-8328-43c7-92fc-f3bf6deee7ef-kube-api-access-ltc4p" (OuterVolumeSpecName: "kube-api-access-ltc4p") pod "180f36bc-8328-43c7-92fc-f3bf6deee7ef" (UID: "180f36bc-8328-43c7-92fc-f3bf6deee7ef"). InnerVolumeSpecName "kube-api-access-ltc4p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:38 crc kubenswrapper[4954]: I1128 16:24:38.921041 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-util" (OuterVolumeSpecName: "util") pod "180f36bc-8328-43c7-92fc-f3bf6deee7ef" (UID: "180f36bc-8328-43c7-92fc-f3bf6deee7ef"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.000950 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltc4p\" (UniqueName: \"kubernetes.io/projected/180f36bc-8328-43c7-92fc-f3bf6deee7ef-kube-api-access-ltc4p\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.000979 4954 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.000988 4954 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/180f36bc-8328-43c7-92fc-f3bf6deee7ef-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.560520 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jdx7r" event={"ID":"16789222-50f1-4060-808b-2915c3ef2ad9","Type":"ContainerStarted","Data":"41a197f1d03662957d1fed988b4dd42eb4bc664acb303aa2dbc9ac22d0ebae49"} Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.571382 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" event={"ID":"180f36bc-8328-43c7-92fc-f3bf6deee7ef","Type":"ContainerDied","Data":"f09b4623f67d1667ea2d22178b04f9ce0d2f26c6f98319a13e76a19f2dcd8b86"} Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.571426 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f09b4623f67d1667ea2d22178b04f9ce0d2f26c6f98319a13e76a19f2dcd8b86" Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.571569 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5" Nov 28 16:24:39 crc kubenswrapper[4954]: I1128 16:24:39.591343 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jdx7r" podStartSLOduration=1.9197576710000002 podStartE2EDuration="4.591287424s" podCreationTimestamp="2025-11-28 16:24:35 +0000 UTC" firstStartedPulling="2025-11-28 16:24:36.530150613 +0000 UTC m=+829.921819154" lastFinishedPulling="2025-11-28 16:24:39.201680366 +0000 UTC m=+832.593348907" observedRunningTime="2025-11-28 16:24:39.587179565 +0000 UTC m=+832.978848126" watchObservedRunningTime="2025-11-28 16:24:39.591287424 +0000 UTC m=+832.982955985" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.560855 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84"] Nov 28 16:24:43 crc kubenswrapper[4954]: E1128 16:24:43.561592 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerName="util" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.561605 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerName="util" Nov 28 16:24:43 crc kubenswrapper[4954]: E1128 16:24:43.561616 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerName="extract" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.561624 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerName="extract" Nov 28 16:24:43 crc kubenswrapper[4954]: E1128 16:24:43.561637 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerName="pull" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.561645 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerName="pull" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.561753 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="180f36bc-8328-43c7-92fc-f3bf6deee7ef" containerName="extract" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.562117 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.565032 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.565296 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.566593 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-hwmdk" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.569353 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84"] Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.667031 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmbb2\" (UniqueName: \"kubernetes.io/projected/28db0f38-55d4-45de-b319-d85de1288c27-kube-api-access-fmbb2\") pod \"nmstate-operator-5b5b58f5c8-pkc84\" (UID: \"28db0f38-55d4-45de-b319-d85de1288c27\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.768015 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmbb2\" (UniqueName: \"kubernetes.io/projected/28db0f38-55d4-45de-b319-d85de1288c27-kube-api-access-fmbb2\") pod \"nmstate-operator-5b5b58f5c8-pkc84\" (UID: \"28db0f38-55d4-45de-b319-d85de1288c27\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.794323 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmbb2\" (UniqueName: \"kubernetes.io/projected/28db0f38-55d4-45de-b319-d85de1288c27-kube-api-access-fmbb2\") pod \"nmstate-operator-5b5b58f5c8-pkc84\" (UID: \"28db0f38-55d4-45de-b319-d85de1288c27\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" Nov 28 16:24:43 crc kubenswrapper[4954]: I1128 16:24:43.885848 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" Nov 28 16:24:44 crc kubenswrapper[4954]: I1128 16:24:44.109435 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84"] Nov 28 16:24:44 crc kubenswrapper[4954]: I1128 16:24:44.604207 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" event={"ID":"28db0f38-55d4-45de-b319-d85de1288c27","Type":"ContainerStarted","Data":"4e78f8fedda92f56c15ae8c7fc3fe3db4b7583510179bc7771147a3cefba3d0e"} Nov 28 16:24:45 crc kubenswrapper[4954]: I1128 16:24:45.788260 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:45 crc kubenswrapper[4954]: I1128 16:24:45.788716 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:45 crc kubenswrapper[4954]: I1128 16:24:45.837183 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:46 crc kubenswrapper[4954]: I1128 16:24:46.621838 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" event={"ID":"28db0f38-55d4-45de-b319-d85de1288c27","Type":"ContainerStarted","Data":"5d4b069d513bc71d245ae025f3943485710a31c7bafd0f3939821b264dd58b30"} Nov 28 16:24:46 crc kubenswrapper[4954]: I1128 16:24:46.642187 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-pkc84" podStartSLOduration=1.346512136 podStartE2EDuration="3.64216484s" podCreationTimestamp="2025-11-28 16:24:43 +0000 UTC" firstStartedPulling="2025-11-28 16:24:44.113264225 +0000 UTC m=+837.504932766" lastFinishedPulling="2025-11-28 16:24:46.408916929 +0000 UTC m=+839.800585470" observedRunningTime="2025-11-28 16:24:46.637397421 +0000 UTC m=+840.029065982" watchObservedRunningTime="2025-11-28 16:24:46.64216484 +0000 UTC m=+840.033833381" Nov 28 16:24:46 crc kubenswrapper[4954]: I1128 16:24:46.678153 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:48 crc kubenswrapper[4954]: I1128 16:24:48.447761 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jdx7r"] Nov 28 16:24:48 crc kubenswrapper[4954]: I1128 16:24:48.633859 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jdx7r" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="registry-server" containerID="cri-o://41a197f1d03662957d1fed988b4dd42eb4bc664acb303aa2dbc9ac22d0ebae49" gracePeriod=2 Nov 28 16:24:51 crc kubenswrapper[4954]: I1128 16:24:51.656352 4954 generic.go:334] "Generic (PLEG): container finished" podID="16789222-50f1-4060-808b-2915c3ef2ad9" containerID="41a197f1d03662957d1fed988b4dd42eb4bc664acb303aa2dbc9ac22d0ebae49" exitCode=0 Nov 28 16:24:51 crc kubenswrapper[4954]: I1128 16:24:51.656442 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jdx7r" event={"ID":"16789222-50f1-4060-808b-2915c3ef2ad9","Type":"ContainerDied","Data":"41a197f1d03662957d1fed988b4dd42eb4bc664acb303aa2dbc9ac22d0ebae49"} Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.745704 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.798275 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-utilities\") pod \"16789222-50f1-4060-808b-2915c3ef2ad9\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.798378 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-catalog-content\") pod \"16789222-50f1-4060-808b-2915c3ef2ad9\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.798417 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sl56\" (UniqueName: \"kubernetes.io/projected/16789222-50f1-4060-808b-2915c3ef2ad9-kube-api-access-5sl56\") pod \"16789222-50f1-4060-808b-2915c3ef2ad9\" (UID: \"16789222-50f1-4060-808b-2915c3ef2ad9\") " Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.799118 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-utilities" (OuterVolumeSpecName: "utilities") pod "16789222-50f1-4060-808b-2915c3ef2ad9" (UID: "16789222-50f1-4060-808b-2915c3ef2ad9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.805300 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16789222-50f1-4060-808b-2915c3ef2ad9-kube-api-access-5sl56" (OuterVolumeSpecName: "kube-api-access-5sl56") pod "16789222-50f1-4060-808b-2915c3ef2ad9" (UID: "16789222-50f1-4060-808b-2915c3ef2ad9"). InnerVolumeSpecName "kube-api-access-5sl56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.900102 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.900133 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sl56\" (UniqueName: \"kubernetes.io/projected/16789222-50f1-4060-808b-2915c3ef2ad9-kube-api-access-5sl56\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:52 crc kubenswrapper[4954]: I1128 16:24:52.906679 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16789222-50f1-4060-808b-2915c3ef2ad9" (UID: "16789222-50f1-4060-808b-2915c3ef2ad9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.001174 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16789222-50f1-4060-808b-2915c3ef2ad9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.434459 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9"] Nov 28 16:24:53 crc kubenswrapper[4954]: E1128 16:24:53.434769 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="registry-server" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.434791 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="registry-server" Nov 28 16:24:53 crc kubenswrapper[4954]: E1128 16:24:53.434809 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="extract-utilities" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.434817 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="extract-utilities" Nov 28 16:24:53 crc kubenswrapper[4954]: E1128 16:24:53.434826 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="extract-content" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.434833 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="extract-content" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.434953 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" containerName="registry-server" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.435735 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.438838 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-bm9rz" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.451476 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.458418 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.459309 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.464288 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-vvd9d"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.465579 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.466112 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.470102 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.506003 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-nmstate-lock\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.506061 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-b65g2\" (UID: \"0f342ee6-8cab-41ec-b04e-61c4e241ef1a\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.506086 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-ovs-socket\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.506137 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62sfs\" (UniqueName: \"kubernetes.io/projected/129fb511-5b79-4405-a5f2-c5b6028f4004-kube-api-access-62sfs\") pod \"nmstate-metrics-7f946cbc9-vshc9\" (UID: \"129fb511-5b79-4405-a5f2-c5b6028f4004\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.506163 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mclz\" (UniqueName: \"kubernetes.io/projected/4506960b-5eca-435d-acc7-4c306a47a327-kube-api-access-6mclz\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.506192 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-dbus-socket\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.506238 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gj2k\" (UniqueName: \"kubernetes.io/projected/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-kube-api-access-7gj2k\") pod \"nmstate-webhook-5f6d4c5ccb-b65g2\" (UID: \"0f342ee6-8cab-41ec-b04e-61c4e241ef1a\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.572852 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.577218 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.578778 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.581230 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-lz4c5" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.581253 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.585646 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607183 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-dbus-socket\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607232 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/cad88f9f-405b-4de7-afab-7df439e60137-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607277 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gj2k\" (UniqueName: \"kubernetes.io/projected/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-kube-api-access-7gj2k\") pod \"nmstate-webhook-5f6d4c5ccb-b65g2\" (UID: \"0f342ee6-8cab-41ec-b04e-61c4e241ef1a\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607300 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-nmstate-lock\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607330 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-b65g2\" (UID: \"0f342ee6-8cab-41ec-b04e-61c4e241ef1a\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607355 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-ovs-socket\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607378 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/cad88f9f-405b-4de7-afab-7df439e60137-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607426 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62sfs\" (UniqueName: \"kubernetes.io/projected/129fb511-5b79-4405-a5f2-c5b6028f4004-kube-api-access-62sfs\") pod \"nmstate-metrics-7f946cbc9-vshc9\" (UID: \"129fb511-5b79-4405-a5f2-c5b6028f4004\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607452 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bscpw\" (UniqueName: \"kubernetes.io/projected/cad88f9f-405b-4de7-afab-7df439e60137-kube-api-access-bscpw\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607474 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mclz\" (UniqueName: \"kubernetes.io/projected/4506960b-5eca-435d-acc7-4c306a47a327-kube-api-access-6mclz\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607586 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-dbus-socket\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607685 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-ovs-socket\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: E1128 16:24:53.607725 4954 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 28 16:24:53 crc kubenswrapper[4954]: E1128 16:24:53.607785 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-tls-key-pair podName:0f342ee6-8cab-41ec-b04e-61c4e241ef1a nodeName:}" failed. No retries permitted until 2025-11-28 16:24:54.107762983 +0000 UTC m=+847.499431604 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-b65g2" (UID: "0f342ee6-8cab-41ec-b04e-61c4e241ef1a") : secret "openshift-nmstate-webhook" not found Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.607810 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/4506960b-5eca-435d-acc7-4c306a47a327-nmstate-lock\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.625299 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mclz\" (UniqueName: \"kubernetes.io/projected/4506960b-5eca-435d-acc7-4c306a47a327-kube-api-access-6mclz\") pod \"nmstate-handler-vvd9d\" (UID: \"4506960b-5eca-435d-acc7-4c306a47a327\") " pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.625875 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62sfs\" (UniqueName: \"kubernetes.io/projected/129fb511-5b79-4405-a5f2-c5b6028f4004-kube-api-access-62sfs\") pod \"nmstate-metrics-7f946cbc9-vshc9\" (UID: \"129fb511-5b79-4405-a5f2-c5b6028f4004\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.629341 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gj2k\" (UniqueName: \"kubernetes.io/projected/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-kube-api-access-7gj2k\") pod \"nmstate-webhook-5f6d4c5ccb-b65g2\" (UID: \"0f342ee6-8cab-41ec-b04e-61c4e241ef1a\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.668079 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jdx7r" event={"ID":"16789222-50f1-4060-808b-2915c3ef2ad9","Type":"ContainerDied","Data":"9c737026e15056e1d0e033d4d6ba25241ac60eeb59f575abaeaee040ce72a58d"} Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.668133 4954 scope.go:117] "RemoveContainer" containerID="41a197f1d03662957d1fed988b4dd42eb4bc664acb303aa2dbc9ac22d0ebae49" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.668256 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jdx7r" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.690875 4954 scope.go:117] "RemoveContainer" containerID="94d1bf1a023356724c72b03bf6f79c6098ce8a5f4b13be403f19a8f14f0d0ea7" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.707400 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jdx7r"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.708167 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bscpw\" (UniqueName: \"kubernetes.io/projected/cad88f9f-405b-4de7-afab-7df439e60137-kube-api-access-bscpw\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.708224 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/cad88f9f-405b-4de7-afab-7df439e60137-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.708310 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/cad88f9f-405b-4de7-afab-7df439e60137-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: E1128 16:24:53.708481 4954 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 28 16:24:53 crc kubenswrapper[4954]: E1128 16:24:53.708553 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cad88f9f-405b-4de7-afab-7df439e60137-plugin-serving-cert podName:cad88f9f-405b-4de7-afab-7df439e60137 nodeName:}" failed. No retries permitted until 2025-11-28 16:24:54.208537272 +0000 UTC m=+847.600205813 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/cad88f9f-405b-4de7-afab-7df439e60137-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-qx8zp" (UID: "cad88f9f-405b-4de7-afab-7df439e60137") : secret "plugin-serving-cert" not found Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.710049 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/cad88f9f-405b-4de7-afab-7df439e60137-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.711799 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jdx7r"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.714778 4954 scope.go:117] "RemoveContainer" containerID="175e2021d071cbd9fcbec61c01250c8fba3f20137755c3bcdb9c39e74b154f5c" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.732016 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bscpw\" (UniqueName: \"kubernetes.io/projected/cad88f9f-405b-4de7-afab-7df439e60137-kube-api-access-bscpw\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.752817 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.775354 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7cfc4f8755-4bfx4"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.780198 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.800325 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7cfc4f8755-4bfx4"] Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.801863 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.810431 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-service-ca\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.810573 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8sk2\" (UniqueName: \"kubernetes.io/projected/65931153-3a0f-4fb6-8c95-dfe0c469ff95-kube-api-access-w8sk2\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.810695 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-oauth-serving-cert\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.810775 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-config\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.810856 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-oauth-config\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.810925 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-trusted-ca-bundle\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.811012 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-serving-cert\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.868277 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16789222-50f1-4060-808b-2915c3ef2ad9" path="/var/lib/kubelet/pods/16789222-50f1-4060-808b-2915c3ef2ad9/volumes" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.915939 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-oauth-config\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.915986 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-trusted-ca-bundle\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.916025 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-serving-cert\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.916057 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-service-ca\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.916077 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8sk2\" (UniqueName: \"kubernetes.io/projected/65931153-3a0f-4fb6-8c95-dfe0c469ff95-kube-api-access-w8sk2\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.916135 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-oauth-serving-cert\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.916153 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-config\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.918022 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-config\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.918043 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-oauth-serving-cert\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.918125 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-trusted-ca-bundle\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.918145 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65931153-3a0f-4fb6-8c95-dfe0c469ff95-service-ca\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.921255 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-serving-cert\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.924322 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65931153-3a0f-4fb6-8c95-dfe0c469ff95-console-oauth-config\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:53 crc kubenswrapper[4954]: I1128 16:24:53.939787 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8sk2\" (UniqueName: \"kubernetes.io/projected/65931153-3a0f-4fb6-8c95-dfe0c469ff95-kube-api-access-w8sk2\") pod \"console-7cfc4f8755-4bfx4\" (UID: \"65931153-3a0f-4fb6-8c95-dfe0c469ff95\") " pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.057897 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9"] Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.118241 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-b65g2\" (UID: \"0f342ee6-8cab-41ec-b04e-61c4e241ef1a\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.122107 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/0f342ee6-8cab-41ec-b04e-61c4e241ef1a-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-b65g2\" (UID: \"0f342ee6-8cab-41ec-b04e-61c4e241ef1a\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.134128 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.220262 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/cad88f9f-405b-4de7-afab-7df439e60137-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.224641 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/cad88f9f-405b-4de7-afab-7df439e60137-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-qx8zp\" (UID: \"cad88f9f-405b-4de7-afab-7df439e60137\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.307688 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7cfc4f8755-4bfx4"] Nov 28 16:24:54 crc kubenswrapper[4954]: W1128 16:24:54.312924 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65931153_3a0f_4fb6_8c95_dfe0c469ff95.slice/crio-8b6ac2f86cee325e1e6bef76c04ebdef9b913d86d484eb42f385f08fa0647657 WatchSource:0}: Error finding container 8b6ac2f86cee325e1e6bef76c04ebdef9b913d86d484eb42f385f08fa0647657: Status 404 returned error can't find the container with id 8b6ac2f86cee325e1e6bef76c04ebdef9b913d86d484eb42f385f08fa0647657 Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.382448 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.492864 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.674648 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-vvd9d" event={"ID":"4506960b-5eca-435d-acc7-4c306a47a327","Type":"ContainerStarted","Data":"e172906d344f13b7924a0cc13844355c2848d6e56c36cb901019fbb0a8579774"} Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.677460 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7cfc4f8755-4bfx4" event={"ID":"65931153-3a0f-4fb6-8c95-dfe0c469ff95","Type":"ContainerStarted","Data":"1dcbe444154808494df0e91d3ea528907cc383858345f2a2b5b0e998fc790f0c"} Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.677488 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7cfc4f8755-4bfx4" event={"ID":"65931153-3a0f-4fb6-8c95-dfe0c469ff95","Type":"ContainerStarted","Data":"8b6ac2f86cee325e1e6bef76c04ebdef9b913d86d484eb42f385f08fa0647657"} Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.679188 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" event={"ID":"129fb511-5b79-4405-a5f2-c5b6028f4004","Type":"ContainerStarted","Data":"e497c010e096fdcb56c6535cb4ab94a96c10a11cb02026f786a079b08e304004"} Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.702128 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7cfc4f8755-4bfx4" podStartSLOduration=1.702084777 podStartE2EDuration="1.702084777s" podCreationTimestamp="2025-11-28 16:24:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:24:54.698116864 +0000 UTC m=+848.089785395" watchObservedRunningTime="2025-11-28 16:24:54.702084777 +0000 UTC m=+848.093753318" Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.726476 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp"] Nov 28 16:24:54 crc kubenswrapper[4954]: I1128 16:24:54.784995 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2"] Nov 28 16:24:55 crc kubenswrapper[4954]: I1128 16:24:55.687628 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" event={"ID":"cad88f9f-405b-4de7-afab-7df439e60137","Type":"ContainerStarted","Data":"4a32c79905f991a3926619e90074ba2cee1f732fb63962dd1cd2ad19f3a7e9c8"} Nov 28 16:24:55 crc kubenswrapper[4954]: I1128 16:24:55.688744 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" event={"ID":"0f342ee6-8cab-41ec-b04e-61c4e241ef1a","Type":"ContainerStarted","Data":"98d671e5f4cb84516c57ec9eb7c69f513eb56a95d8bef61eeb6c6f91fe39b01e"} Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.703236 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" event={"ID":"cad88f9f-405b-4de7-afab-7df439e60137","Type":"ContainerStarted","Data":"f27ccbcc0964a7c5bc9b032263216b4097bc6e0e1d9eda362099bf9c434e100f"} Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.704452 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-vvd9d" event={"ID":"4506960b-5eca-435d-acc7-4c306a47a327","Type":"ContainerStarted","Data":"eec3a17eeb1ea708fbf86276a88d5687845adc60d945a589c6ff51afa688501d"} Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.704569 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.705701 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" event={"ID":"0f342ee6-8cab-41ec-b04e-61c4e241ef1a","Type":"ContainerStarted","Data":"a0f6b473da93b573f4c3cad25a354c0bea88aa92ff962287984e903db33fcae3"} Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.706086 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.707697 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" event={"ID":"129fb511-5b79-4405-a5f2-c5b6028f4004","Type":"ContainerStarted","Data":"2005af7b76da8ec1bf9ab7f22a318b677df3842885ae9ca6d82430c368235cb0"} Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.722030 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-qx8zp" podStartSLOduration=1.9854291640000001 podStartE2EDuration="4.72201208s" podCreationTimestamp="2025-11-28 16:24:53 +0000 UTC" firstStartedPulling="2025-11-28 16:24:54.741915162 +0000 UTC m=+848.133583703" lastFinishedPulling="2025-11-28 16:24:57.478498078 +0000 UTC m=+850.870166619" observedRunningTime="2025-11-28 16:24:57.720989148 +0000 UTC m=+851.112657699" watchObservedRunningTime="2025-11-28 16:24:57.72201208 +0000 UTC m=+851.113680621" Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.737627 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" podStartSLOduration=2.926830249 podStartE2EDuration="4.737606758s" podCreationTimestamp="2025-11-28 16:24:53 +0000 UTC" firstStartedPulling="2025-11-28 16:24:54.791382428 +0000 UTC m=+848.183050969" lastFinishedPulling="2025-11-28 16:24:56.602158897 +0000 UTC m=+849.993827478" observedRunningTime="2025-11-28 16:24:57.736282026 +0000 UTC m=+851.127950567" watchObservedRunningTime="2025-11-28 16:24:57.737606758 +0000 UTC m=+851.129275299" Nov 28 16:24:57 crc kubenswrapper[4954]: I1128 16:24:57.760706 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-vvd9d" podStartSLOduration=2.049883048 podStartE2EDuration="4.760687209s" podCreationTimestamp="2025-11-28 16:24:53 +0000 UTC" firstStartedPulling="2025-11-28 16:24:53.854303579 +0000 UTC m=+847.245972120" lastFinishedPulling="2025-11-28 16:24:56.56510769 +0000 UTC m=+849.956776281" observedRunningTime="2025-11-28 16:24:57.760151773 +0000 UTC m=+851.151820324" watchObservedRunningTime="2025-11-28 16:24:57.760687209 +0000 UTC m=+851.152355750" Nov 28 16:24:59 crc kubenswrapper[4954]: I1128 16:24:59.721595 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" event={"ID":"129fb511-5b79-4405-a5f2-c5b6028f4004","Type":"ContainerStarted","Data":"7f19e71a13a12cb1565ff350cccfc45facfa5cba49fa4457040e9d47ad33cd4d"} Nov 28 16:24:59 crc kubenswrapper[4954]: I1128 16:24:59.748302 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vshc9" podStartSLOduration=2.021262124 podStartE2EDuration="6.748282225s" podCreationTimestamp="2025-11-28 16:24:53 +0000 UTC" firstStartedPulling="2025-11-28 16:24:54.063795157 +0000 UTC m=+847.455463698" lastFinishedPulling="2025-11-28 16:24:58.790815228 +0000 UTC m=+852.182483799" observedRunningTime="2025-11-28 16:24:59.744423954 +0000 UTC m=+853.136092535" watchObservedRunningTime="2025-11-28 16:24:59.748282225 +0000 UTC m=+853.139950766" Nov 28 16:25:03 crc kubenswrapper[4954]: I1128 16:25:03.840645 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-vvd9d" Nov 28 16:25:04 crc kubenswrapper[4954]: I1128 16:25:04.135803 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:25:04 crc kubenswrapper[4954]: I1128 16:25:04.135883 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:25:04 crc kubenswrapper[4954]: I1128 16:25:04.143484 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:25:04 crc kubenswrapper[4954]: I1128 16:25:04.762242 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7cfc4f8755-4bfx4" Nov 28 16:25:04 crc kubenswrapper[4954]: I1128 16:25:04.836393 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-kw5b6"] Nov 28 16:25:14 crc kubenswrapper[4954]: I1128 16:25:14.390916 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-b65g2" Nov 28 16:25:28 crc kubenswrapper[4954]: I1128 16:25:28.884206 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n"] Nov 28 16:25:28 crc kubenswrapper[4954]: I1128 16:25:28.885690 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:28 crc kubenswrapper[4954]: I1128 16:25:28.888239 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 16:25:28 crc kubenswrapper[4954]: I1128 16:25:28.896434 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n"] Nov 28 16:25:28 crc kubenswrapper[4954]: I1128 16:25:28.942601 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68sfs\" (UniqueName: \"kubernetes.io/projected/0a285e7e-472d-473d-909c-c1c2de773b12-kube-api-access-68sfs\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:28 crc kubenswrapper[4954]: I1128 16:25:28.943250 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:28 crc kubenswrapper[4954]: I1128 16:25:28.943542 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.045000 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.045075 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.045139 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68sfs\" (UniqueName: \"kubernetes.io/projected/0a285e7e-472d-473d-909c-c1c2de773b12-kube-api-access-68sfs\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.046499 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.046537 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.069667 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68sfs\" (UniqueName: \"kubernetes.io/projected/0a285e7e-472d-473d-909c-c1c2de773b12-kube-api-access-68sfs\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.209417 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.453749 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n"] Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.876328 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-kw5b6" podUID="ecfd366b-1ead-4b0a-9591-1d69197fe1a3" containerName="console" containerID="cri-o://2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278" gracePeriod=15 Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.933629 4954 generic.go:334] "Generic (PLEG): container finished" podID="0a285e7e-472d-473d-909c-c1c2de773b12" containerID="6e206f25c0b546d7e5850777ebcfdc06b90710dddcea560c061af87a93a5e974" exitCode=0 Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.933684 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" event={"ID":"0a285e7e-472d-473d-909c-c1c2de773b12","Type":"ContainerDied","Data":"6e206f25c0b546d7e5850777ebcfdc06b90710dddcea560c061af87a93a5e974"} Nov 28 16:25:29 crc kubenswrapper[4954]: I1128 16:25:29.933737 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" event={"ID":"0a285e7e-472d-473d-909c-c1c2de773b12","Type":"ContainerStarted","Data":"3fd0a784a4daba9cb5140f0145cbdfec1a4590b2eb5fb8139943090891d2d4c6"} Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.267353 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-kw5b6_ecfd366b-1ead-4b0a-9591-1d69197fe1a3/console/0.log" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.267440 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.363867 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-config\") pod \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.364012 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-service-ca\") pod \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.364077 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-oauth-serving-cert\") pod \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.364118 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-oauth-config\") pod \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.364183 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-trusted-ca-bundle\") pod \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.364230 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-serving-cert\") pod \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.364277 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69qvm\" (UniqueName: \"kubernetes.io/projected/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-kube-api-access-69qvm\") pod \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\" (UID: \"ecfd366b-1ead-4b0a-9591-1d69197fe1a3\") " Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.364887 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-config" (OuterVolumeSpecName: "console-config") pod "ecfd366b-1ead-4b0a-9591-1d69197fe1a3" (UID: "ecfd366b-1ead-4b0a-9591-1d69197fe1a3"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.365100 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-service-ca" (OuterVolumeSpecName: "service-ca") pod "ecfd366b-1ead-4b0a-9591-1d69197fe1a3" (UID: "ecfd366b-1ead-4b0a-9591-1d69197fe1a3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.365250 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ecfd366b-1ead-4b0a-9591-1d69197fe1a3" (UID: "ecfd366b-1ead-4b0a-9591-1d69197fe1a3"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.365386 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ecfd366b-1ead-4b0a-9591-1d69197fe1a3" (UID: "ecfd366b-1ead-4b0a-9591-1d69197fe1a3"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.370748 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-kube-api-access-69qvm" (OuterVolumeSpecName: "kube-api-access-69qvm") pod "ecfd366b-1ead-4b0a-9591-1d69197fe1a3" (UID: "ecfd366b-1ead-4b0a-9591-1d69197fe1a3"). InnerVolumeSpecName "kube-api-access-69qvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.373080 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ecfd366b-1ead-4b0a-9591-1d69197fe1a3" (UID: "ecfd366b-1ead-4b0a-9591-1d69197fe1a3"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.373362 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ecfd366b-1ead-4b0a-9591-1d69197fe1a3" (UID: "ecfd366b-1ead-4b0a-9591-1d69197fe1a3"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.466593 4954 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.466639 4954 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.466658 4954 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.466678 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69qvm\" (UniqueName: \"kubernetes.io/projected/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-kube-api-access-69qvm\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.466697 4954 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.466713 4954 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.466730 4954 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ecfd366b-1ead-4b0a-9591-1d69197fe1a3-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.943558 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-kw5b6_ecfd366b-1ead-4b0a-9591-1d69197fe1a3/console/0.log" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.943626 4954 generic.go:334] "Generic (PLEG): container finished" podID="ecfd366b-1ead-4b0a-9591-1d69197fe1a3" containerID="2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278" exitCode=2 Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.943669 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kw5b6" event={"ID":"ecfd366b-1ead-4b0a-9591-1d69197fe1a3","Type":"ContainerDied","Data":"2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278"} Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.943708 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-kw5b6" event={"ID":"ecfd366b-1ead-4b0a-9591-1d69197fe1a3","Type":"ContainerDied","Data":"9a3b0967ef183bb34e14156ac1d66c858b0a2587bf173dd3bdebb91da69f4978"} Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.943730 4954 scope.go:117] "RemoveContainer" containerID="2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.943783 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-kw5b6" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.959089 4954 scope.go:117] "RemoveContainer" containerID="2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278" Nov 28 16:25:30 crc kubenswrapper[4954]: E1128 16:25:30.959544 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278\": container with ID starting with 2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278 not found: ID does not exist" containerID="2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.959588 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278"} err="failed to get container status \"2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278\": rpc error: code = NotFound desc = could not find container \"2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278\": container with ID starting with 2bdb438f0a400a8d3593401dfb355444ad3eb99ef2849a67f081a3ff0e2d5278 not found: ID does not exist" Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.984327 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-kw5b6"] Nov 28 16:25:30 crc kubenswrapper[4954]: I1128 16:25:30.987549 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-kw5b6"] Nov 28 16:25:31 crc kubenswrapper[4954]: I1128 16:25:31.868545 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecfd366b-1ead-4b0a-9591-1d69197fe1a3" path="/var/lib/kubelet/pods/ecfd366b-1ead-4b0a-9591-1d69197fe1a3/volumes" Nov 28 16:25:31 crc kubenswrapper[4954]: I1128 16:25:31.957018 4954 generic.go:334] "Generic (PLEG): container finished" podID="0a285e7e-472d-473d-909c-c1c2de773b12" containerID="f140547fbb52de396a2a02b8964e0f1af50d999af896d5913bc99d5bced53677" exitCode=0 Nov 28 16:25:31 crc kubenswrapper[4954]: I1128 16:25:31.957085 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" event={"ID":"0a285e7e-472d-473d-909c-c1c2de773b12","Type":"ContainerDied","Data":"f140547fbb52de396a2a02b8964e0f1af50d999af896d5913bc99d5bced53677"} Nov 28 16:25:32 crc kubenswrapper[4954]: I1128 16:25:32.968248 4954 generic.go:334] "Generic (PLEG): container finished" podID="0a285e7e-472d-473d-909c-c1c2de773b12" containerID="0ea891c891fa57c67addf45383d8b22f6249b43b07800c4dd441b53414499da9" exitCode=0 Nov 28 16:25:32 crc kubenswrapper[4954]: I1128 16:25:32.968377 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" event={"ID":"0a285e7e-472d-473d-909c-c1c2de773b12","Type":"ContainerDied","Data":"0ea891c891fa57c67addf45383d8b22f6249b43b07800c4dd441b53414499da9"} Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.222864 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.322600 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-bundle\") pod \"0a285e7e-472d-473d-909c-c1c2de773b12\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.322678 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-util\") pod \"0a285e7e-472d-473d-909c-c1c2de773b12\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.322709 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68sfs\" (UniqueName: \"kubernetes.io/projected/0a285e7e-472d-473d-909c-c1c2de773b12-kube-api-access-68sfs\") pod \"0a285e7e-472d-473d-909c-c1c2de773b12\" (UID: \"0a285e7e-472d-473d-909c-c1c2de773b12\") " Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.324932 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-bundle" (OuterVolumeSpecName: "bundle") pod "0a285e7e-472d-473d-909c-c1c2de773b12" (UID: "0a285e7e-472d-473d-909c-c1c2de773b12"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.331145 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a285e7e-472d-473d-909c-c1c2de773b12-kube-api-access-68sfs" (OuterVolumeSpecName: "kube-api-access-68sfs") pod "0a285e7e-472d-473d-909c-c1c2de773b12" (UID: "0a285e7e-472d-473d-909c-c1c2de773b12"). InnerVolumeSpecName "kube-api-access-68sfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.345048 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-util" (OuterVolumeSpecName: "util") pod "0a285e7e-472d-473d-909c-c1c2de773b12" (UID: "0a285e7e-472d-473d-909c-c1c2de773b12"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.425910 4954 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.425957 4954 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0a285e7e-472d-473d-909c-c1c2de773b12-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.425974 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68sfs\" (UniqueName: \"kubernetes.io/projected/0a285e7e-472d-473d-909c-c1c2de773b12-kube-api-access-68sfs\") on node \"crc\" DevicePath \"\"" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.985904 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" event={"ID":"0a285e7e-472d-473d-909c-c1c2de773b12","Type":"ContainerDied","Data":"3fd0a784a4daba9cb5140f0145cbdfec1a4590b2eb5fb8139943090891d2d4c6"} Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.985947 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fd0a784a4daba9cb5140f0145cbdfec1a4590b2eb5fb8139943090891d2d4c6" Nov 28 16:25:34 crc kubenswrapper[4954]: I1128 16:25:34.985975 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.840861 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lg9mv"] Nov 28 16:25:41 crc kubenswrapper[4954]: E1128 16:25:41.841749 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a285e7e-472d-473d-909c-c1c2de773b12" containerName="util" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.841765 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a285e7e-472d-473d-909c-c1c2de773b12" containerName="util" Nov 28 16:25:41 crc kubenswrapper[4954]: E1128 16:25:41.841778 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a285e7e-472d-473d-909c-c1c2de773b12" containerName="extract" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.841786 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a285e7e-472d-473d-909c-c1c2de773b12" containerName="extract" Nov 28 16:25:41 crc kubenswrapper[4954]: E1128 16:25:41.841799 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a285e7e-472d-473d-909c-c1c2de773b12" containerName="pull" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.841806 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a285e7e-472d-473d-909c-c1c2de773b12" containerName="pull" Nov 28 16:25:41 crc kubenswrapper[4954]: E1128 16:25:41.841829 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecfd366b-1ead-4b0a-9591-1d69197fe1a3" containerName="console" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.841836 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecfd366b-1ead-4b0a-9591-1d69197fe1a3" containerName="console" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.841953 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecfd366b-1ead-4b0a-9591-1d69197fe1a3" containerName="console" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.841970 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a285e7e-472d-473d-909c-c1c2de773b12" containerName="extract" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.842934 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.853595 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lg9mv"] Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.932483 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-utilities\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.932541 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27zpf\" (UniqueName: \"kubernetes.io/projected/ded66232-6373-4951-9a72-406335b13f68-kube-api-access-27zpf\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:41 crc kubenswrapper[4954]: I1128 16:25:41.933120 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-catalog-content\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.034353 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-utilities\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.034660 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27zpf\" (UniqueName: \"kubernetes.io/projected/ded66232-6373-4951-9a72-406335b13f68-kube-api-access-27zpf\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.034751 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-catalog-content\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.035265 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-catalog-content\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.035404 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-utilities\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.054467 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27zpf\" (UniqueName: \"kubernetes.io/projected/ded66232-6373-4951-9a72-406335b13f68-kube-api-access-27zpf\") pod \"certified-operators-lg9mv\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.158115 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:25:42 crc kubenswrapper[4954]: I1128 16:25:42.631390 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lg9mv"] Nov 28 16:25:42 crc kubenswrapper[4954]: W1128 16:25:42.640880 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podded66232_6373_4951_9a72_406335b13f68.slice/crio-2debcb7f51474e70f9985aa297f9703edbbd129b74df69bfa183b1c4d3e85e52 WatchSource:0}: Error finding container 2debcb7f51474e70f9985aa297f9703edbbd129b74df69bfa183b1c4d3e85e52: Status 404 returned error can't find the container with id 2debcb7f51474e70f9985aa297f9703edbbd129b74df69bfa183b1c4d3e85e52 Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.028245 4954 generic.go:334] "Generic (PLEG): container finished" podID="ded66232-6373-4951-9a72-406335b13f68" containerID="2dcb6eca6a0b1f97a5a981bc318242d31cd7f9105887b3ccbf4fa97e29fd4d02" exitCode=0 Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.028312 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg9mv" event={"ID":"ded66232-6373-4951-9a72-406335b13f68","Type":"ContainerDied","Data":"2dcb6eca6a0b1f97a5a981bc318242d31cd7f9105887b3ccbf4fa97e29fd4d02"} Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.028483 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg9mv" event={"ID":"ded66232-6373-4951-9a72-406335b13f68","Type":"ContainerStarted","Data":"2debcb7f51474e70f9985aa297f9703edbbd129b74df69bfa183b1c4d3e85e52"} Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.483425 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n"] Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.484308 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.486832 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.486837 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-zb7q9" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.487374 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.487775 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.488584 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.541413 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n"] Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.567117 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcv72\" (UniqueName: \"kubernetes.io/projected/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-kube-api-access-dcv72\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.567183 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-apiservice-cert\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.567411 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-webhook-cert\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.668313 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-webhook-cert\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.668404 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcv72\" (UniqueName: \"kubernetes.io/projected/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-kube-api-access-dcv72\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.668446 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-apiservice-cert\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.675026 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-apiservice-cert\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.675476 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-webhook-cert\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.690364 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcv72\" (UniqueName: \"kubernetes.io/projected/f60ce9cc-8da2-43ee-a9b9-ea24ed39a910-kube-api-access-dcv72\") pod \"metallb-operator-controller-manager-67f7cc84c-gqj2n\" (UID: \"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910\") " pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.799848 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.812230 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd"] Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.822503 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.828052 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.828274 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-vpcg2" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.828379 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.835852 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd"] Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.973075 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d80c8971-4919-44a7-9241-ad0095dbb820-apiservice-cert\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.973133 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr5k7\" (UniqueName: \"kubernetes.io/projected/d80c8971-4919-44a7-9241-ad0095dbb820-kube-api-access-mr5k7\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:43 crc kubenswrapper[4954]: I1128 16:25:43.973185 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d80c8971-4919-44a7-9241-ad0095dbb820-webhook-cert\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.043719 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg9mv" event={"ID":"ded66232-6373-4951-9a72-406335b13f68","Type":"ContainerStarted","Data":"f97634cd4837d09ac78320821c31ddde8fa59afde7a6353ac4a40e921072410a"} Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.073919 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d80c8971-4919-44a7-9241-ad0095dbb820-apiservice-cert\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.073962 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr5k7\" (UniqueName: \"kubernetes.io/projected/d80c8971-4919-44a7-9241-ad0095dbb820-kube-api-access-mr5k7\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.073999 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d80c8971-4919-44a7-9241-ad0095dbb820-webhook-cert\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.079995 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d80c8971-4919-44a7-9241-ad0095dbb820-webhook-cert\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.080896 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n"] Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.081681 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d80c8971-4919-44a7-9241-ad0095dbb820-apiservice-cert\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: W1128 16:25:44.091393 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf60ce9cc_8da2_43ee_a9b9_ea24ed39a910.slice/crio-f027986d4543c4b7ff1d9ef92ff5086e9ebf1625b8ae69e50a1bf0789d09cfef WatchSource:0}: Error finding container f027986d4543c4b7ff1d9ef92ff5086e9ebf1625b8ae69e50a1bf0789d09cfef: Status 404 returned error can't find the container with id f027986d4543c4b7ff1d9ef92ff5086e9ebf1625b8ae69e50a1bf0789d09cfef Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.101754 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr5k7\" (UniqueName: \"kubernetes.io/projected/d80c8971-4919-44a7-9241-ad0095dbb820-kube-api-access-mr5k7\") pod \"metallb-operator-webhook-server-54cdc5b6b6-5zldd\" (UID: \"d80c8971-4919-44a7-9241-ad0095dbb820\") " pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.194474 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:25:44 crc kubenswrapper[4954]: I1128 16:25:44.396630 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd"] Nov 28 16:25:44 crc kubenswrapper[4954]: W1128 16:25:44.403577 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd80c8971_4919_44a7_9241_ad0095dbb820.slice/crio-75529b429a4a870b3e7c6bfef2b33e68b0398aa97aa41556b24981c7ed66e0a9 WatchSource:0}: Error finding container 75529b429a4a870b3e7c6bfef2b33e68b0398aa97aa41556b24981c7ed66e0a9: Status 404 returned error can't find the container with id 75529b429a4a870b3e7c6bfef2b33e68b0398aa97aa41556b24981c7ed66e0a9 Nov 28 16:25:45 crc kubenswrapper[4954]: I1128 16:25:45.051542 4954 generic.go:334] "Generic (PLEG): container finished" podID="ded66232-6373-4951-9a72-406335b13f68" containerID="f97634cd4837d09ac78320821c31ddde8fa59afde7a6353ac4a40e921072410a" exitCode=0 Nov 28 16:25:45 crc kubenswrapper[4954]: I1128 16:25:45.051634 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg9mv" event={"ID":"ded66232-6373-4951-9a72-406335b13f68","Type":"ContainerDied","Data":"f97634cd4837d09ac78320821c31ddde8fa59afde7a6353ac4a40e921072410a"} Nov 28 16:25:45 crc kubenswrapper[4954]: I1128 16:25:45.053326 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" event={"ID":"d80c8971-4919-44a7-9241-ad0095dbb820","Type":"ContainerStarted","Data":"75529b429a4a870b3e7c6bfef2b33e68b0398aa97aa41556b24981c7ed66e0a9"} Nov 28 16:25:45 crc kubenswrapper[4954]: I1128 16:25:45.054941 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" event={"ID":"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910","Type":"ContainerStarted","Data":"f027986d4543c4b7ff1d9ef92ff5086e9ebf1625b8ae69e50a1bf0789d09cfef"} Nov 28 16:25:46 crc kubenswrapper[4954]: I1128 16:25:46.847662 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Nov 28 16:25:46 crc kubenswrapper[4954]: I1128 16:25:46.849673 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:46 crc kubenswrapper[4954]: I1128 16:25:46.861887 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.025410 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr557\" (UniqueName: \"kubernetes.io/projected/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-kube-api-access-gr557\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.025490 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-catalog-content\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.025572 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-utilities\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.136064 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-catalog-content\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.136187 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-utilities\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.136292 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr557\" (UniqueName: \"kubernetes.io/projected/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-kube-api-access-gr557\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.136742 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-catalog-content\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.136838 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-utilities\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.160836 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr557\" (UniqueName: \"kubernetes.io/projected/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-kube-api-access-gr557\") pod \"community-operators-fl7qg\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:47 crc kubenswrapper[4954]: I1128 16:25:47.175622 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:25:48 crc kubenswrapper[4954]: I1128 16:25:48.385129 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Nov 28 16:25:48 crc kubenswrapper[4954]: W1128 16:25:48.547063 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb5e33eb0_5434_4ba1_adcc_8e275488bd9c.slice/crio-1222689a7b78de285f8666e676c532147c2ae8a86d601ec247903f83d7753e40 WatchSource:0}: Error finding container 1222689a7b78de285f8666e676c532147c2ae8a86d601ec247903f83d7753e40: Status 404 returned error can't find the container with id 1222689a7b78de285f8666e676c532147c2ae8a86d601ec247903f83d7753e40 Nov 28 16:25:49 crc kubenswrapper[4954]: I1128 16:25:49.092006 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"b5e33eb0-5434-4ba1-adcc-8e275488bd9c","Type":"ContainerStarted","Data":"1222689a7b78de285f8666e676c532147c2ae8a86d601ec247903f83d7753e40"} Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.438952 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rd6gg"] Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.440263 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.456110 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rd6gg"] Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.598440 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94d2z\" (UniqueName: \"kubernetes.io/projected/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-kube-api-access-94d2z\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.598559 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-catalog-content\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.598583 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-utilities\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.700039 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94d2z\" (UniqueName: \"kubernetes.io/projected/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-kube-api-access-94d2z\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.700132 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-catalog-content\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.700149 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-utilities\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.700704 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-catalog-content\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.700767 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-utilities\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.728623 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94d2z\" (UniqueName: \"kubernetes.io/projected/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-kube-api-access-94d2z\") pod \"redhat-marketplace-rd6gg\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:50 crc kubenswrapper[4954]: I1128 16:25:50.756451 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:25:52 crc kubenswrapper[4954]: I1128 16:25:52.240153 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rd6gg"] Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.120817 4954 generic.go:334] "Generic (PLEG): container finished" podID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerID="a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc" exitCode=0 Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.120972 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"b5e33eb0-5434-4ba1-adcc-8e275488bd9c","Type":"ContainerDied","Data":"a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc"} Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.123710 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" event={"ID":"f60ce9cc-8da2-43ee-a9b9-ea24ed39a910","Type":"ContainerStarted","Data":"aa8ce4fd51ef76020627e2743cd458f2ad8e17652b29bf7b212be1dceefce25e"} Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.123847 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.126352 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg9mv" event={"ID":"ded66232-6373-4951-9a72-406335b13f68","Type":"ContainerStarted","Data":"7e5903b374f4957f4e30a901bf2cceb1e1bf9c065f660233c78631a9eb7e9c3e"} Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.128011 4954 generic.go:334] "Generic (PLEG): container finished" podID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerID="ce6a53ad3b5bf514cbba829c5590a35d7957d59db2d58a06812c9ba692e42e96" exitCode=0 Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.128047 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rd6gg" event={"ID":"d301eb21-71c9-4228-b7e7-b6d1c2b72e75","Type":"ContainerDied","Data":"ce6a53ad3b5bf514cbba829c5590a35d7957d59db2d58a06812c9ba692e42e96"} Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.128069 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rd6gg" event={"ID":"d301eb21-71c9-4228-b7e7-b6d1c2b72e75","Type":"ContainerStarted","Data":"09a7158b76a29c2d7ae43b1ca9d4e446019f132274518043dbb4e8fad66596f5"} Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.167932 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lg9mv" podStartSLOduration=7.131780304 podStartE2EDuration="12.167913546s" podCreationTimestamp="2025-11-28 16:25:41 +0000 UTC" firstStartedPulling="2025-11-28 16:25:43.03096622 +0000 UTC m=+896.422634761" lastFinishedPulling="2025-11-28 16:25:48.067099442 +0000 UTC m=+901.458768003" observedRunningTime="2025-11-28 16:25:53.162413764 +0000 UTC m=+906.554082315" watchObservedRunningTime="2025-11-28 16:25:53.167913546 +0000 UTC m=+906.559582097" Nov 28 16:25:53 crc kubenswrapper[4954]: I1128 16:25:53.206419 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" podStartSLOduration=2.32840079 podStartE2EDuration="10.206398869s" podCreationTimestamp="2025-11-28 16:25:43 +0000 UTC" firstStartedPulling="2025-11-28 16:25:44.095326448 +0000 UTC m=+897.486994989" lastFinishedPulling="2025-11-28 16:25:51.973324527 +0000 UTC m=+905.364993068" observedRunningTime="2025-11-28 16:25:53.199004448 +0000 UTC m=+906.590672999" watchObservedRunningTime="2025-11-28 16:25:53.206398869 +0000 UTC m=+906.598067420" Nov 28 16:26:01 crc kubenswrapper[4954]: I1128 16:26:01.171874 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" event={"ID":"d80c8971-4919-44a7-9241-ad0095dbb820","Type":"ContainerStarted","Data":"dd476ca41c1a7eb8b0ffeaf1908a8db2fdfb12e9fe28c34ae799ce3ab473d28a"} Nov 28 16:26:01 crc kubenswrapper[4954]: I1128 16:26:01.172310 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:26:01 crc kubenswrapper[4954]: I1128 16:26:01.194994 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" podStartSLOduration=2.6348369160000003 podStartE2EDuration="18.194979026s" podCreationTimestamp="2025-11-28 16:25:43 +0000 UTC" firstStartedPulling="2025-11-28 16:25:44.410257211 +0000 UTC m=+897.801925742" lastFinishedPulling="2025-11-28 16:25:59.970399311 +0000 UTC m=+913.362067852" observedRunningTime="2025-11-28 16:26:01.193878561 +0000 UTC m=+914.585547102" watchObservedRunningTime="2025-11-28 16:26:01.194979026 +0000 UTC m=+914.586647567" Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.158971 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.159326 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.179203 4954 generic.go:334] "Generic (PLEG): container finished" podID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerID="e2dbcc00a400187a0277fcb0465db994bffbc3152052cb74f200cb05d4b8016f" exitCode=0 Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.179266 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rd6gg" event={"ID":"d301eb21-71c9-4228-b7e7-b6d1c2b72e75","Type":"ContainerDied","Data":"e2dbcc00a400187a0277fcb0465db994bffbc3152052cb74f200cb05d4b8016f"} Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.182912 4954 generic.go:334] "Generic (PLEG): container finished" podID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerID="793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d" exitCode=0 Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.183433 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"b5e33eb0-5434-4ba1-adcc-8e275488bd9c","Type":"ContainerDied","Data":"793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d"} Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.206789 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.258539 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.481057 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:26:02 crc kubenswrapper[4954]: I1128 16:26:02.481128 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:26:04 crc kubenswrapper[4954]: I1128 16:26:04.198418 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rd6gg" event={"ID":"d301eb21-71c9-4228-b7e7-b6d1c2b72e75","Type":"ContainerStarted","Data":"9b9161263a2f3b6d212da9de7b744e7fe5d760427ad1b1d946fe722322754323"} Nov 28 16:26:04 crc kubenswrapper[4954]: I1128 16:26:04.201276 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"b5e33eb0-5434-4ba1-adcc-8e275488bd9c","Type":"ContainerStarted","Data":"a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e"} Nov 28 16:26:04 crc kubenswrapper[4954]: I1128 16:26:04.225772 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rd6gg" podStartSLOduration=10.635965254 podStartE2EDuration="14.225752662s" podCreationTimestamp="2025-11-28 16:25:50 +0000 UTC" firstStartedPulling="2025-11-28 16:25:59.950120537 +0000 UTC m=+913.341789078" lastFinishedPulling="2025-11-28 16:26:03.539907945 +0000 UTC m=+916.931576486" observedRunningTime="2025-11-28 16:26:04.214897703 +0000 UTC m=+917.606566254" watchObservedRunningTime="2025-11-28 16:26:04.225752662 +0000 UTC m=+917.617421213" Nov 28 16:26:04 crc kubenswrapper[4954]: I1128 16:26:04.240843 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fl7qg" podStartSLOduration=14.41801192 podStartE2EDuration="18.240822484s" podCreationTimestamp="2025-11-28 16:25:46 +0000 UTC" firstStartedPulling="2025-11-28 16:25:59.950169839 +0000 UTC m=+913.341838380" lastFinishedPulling="2025-11-28 16:26:03.772980403 +0000 UTC m=+917.164648944" observedRunningTime="2025-11-28 16:26:04.235494007 +0000 UTC m=+917.627162558" watchObservedRunningTime="2025-11-28 16:26:04.240822484 +0000 UTC m=+917.632491025" Nov 28 16:26:07 crc kubenswrapper[4954]: I1128 16:26:07.176255 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:26:07 crc kubenswrapper[4954]: I1128 16:26:07.176880 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:26:07 crc kubenswrapper[4954]: I1128 16:26:07.256311 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:26:08 crc kubenswrapper[4954]: I1128 16:26:08.431721 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lg9mv"] Nov 28 16:26:08 crc kubenswrapper[4954]: I1128 16:26:08.432322 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lg9mv" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="registry-server" containerID="cri-o://7e5903b374f4957f4e30a901bf2cceb1e1bf9c065f660233c78631a9eb7e9c3e" gracePeriod=2 Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.227755 4954 generic.go:334] "Generic (PLEG): container finished" podID="ded66232-6373-4951-9a72-406335b13f68" containerID="7e5903b374f4957f4e30a901bf2cceb1e1bf9c065f660233c78631a9eb7e9c3e" exitCode=0 Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.227803 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg9mv" event={"ID":"ded66232-6373-4951-9a72-406335b13f68","Type":"ContainerDied","Data":"7e5903b374f4957f4e30a901bf2cceb1e1bf9c065f660233c78631a9eb7e9c3e"} Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.417160 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.540793 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-utilities\") pod \"ded66232-6373-4951-9a72-406335b13f68\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.540936 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27zpf\" (UniqueName: \"kubernetes.io/projected/ded66232-6373-4951-9a72-406335b13f68-kube-api-access-27zpf\") pod \"ded66232-6373-4951-9a72-406335b13f68\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.541013 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-catalog-content\") pod \"ded66232-6373-4951-9a72-406335b13f68\" (UID: \"ded66232-6373-4951-9a72-406335b13f68\") " Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.541587 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-utilities" (OuterVolumeSpecName: "utilities") pod "ded66232-6373-4951-9a72-406335b13f68" (UID: "ded66232-6373-4951-9a72-406335b13f68"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.565295 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ded66232-6373-4951-9a72-406335b13f68-kube-api-access-27zpf" (OuterVolumeSpecName: "kube-api-access-27zpf") pod "ded66232-6373-4951-9a72-406335b13f68" (UID: "ded66232-6373-4951-9a72-406335b13f68"). InnerVolumeSpecName "kube-api-access-27zpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.592236 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ded66232-6373-4951-9a72-406335b13f68" (UID: "ded66232-6373-4951-9a72-406335b13f68"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.642205 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.642239 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded66232-6373-4951-9a72-406335b13f68-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:09 crc kubenswrapper[4954]: I1128 16:26:09.642254 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27zpf\" (UniqueName: \"kubernetes.io/projected/ded66232-6373-4951-9a72-406335b13f68-kube-api-access-27zpf\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.238505 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg9mv" event={"ID":"ded66232-6373-4951-9a72-406335b13f68","Type":"ContainerDied","Data":"2debcb7f51474e70f9985aa297f9703edbbd129b74df69bfa183b1c4d3e85e52"} Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.238579 4954 scope.go:117] "RemoveContainer" containerID="7e5903b374f4957f4e30a901bf2cceb1e1bf9c065f660233c78631a9eb7e9c3e" Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.238711 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg9mv" Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.257267 4954 scope.go:117] "RemoveContainer" containerID="f97634cd4837d09ac78320821c31ddde8fa59afde7a6353ac4a40e921072410a" Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.264022 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lg9mv"] Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.269162 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lg9mv"] Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.273674 4954 scope.go:117] "RemoveContainer" containerID="2dcb6eca6a0b1f97a5a981bc318242d31cd7f9105887b3ccbf4fa97e29fd4d02" Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.756841 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.756911 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:26:10 crc kubenswrapper[4954]: I1128 16:26:10.824744 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:26:11 crc kubenswrapper[4954]: I1128 16:26:11.301850 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:26:11 crc kubenswrapper[4954]: I1128 16:26:11.864880 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ded66232-6373-4951-9a72-406335b13f68" path="/var/lib/kubelet/pods/ded66232-6373-4951-9a72-406335b13f68/volumes" Nov 28 16:26:14 crc kubenswrapper[4954]: I1128 16:26:14.200700 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-54cdc5b6b6-5zldd" Nov 28 16:26:14 crc kubenswrapper[4954]: I1128 16:26:14.630183 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rd6gg"] Nov 28 16:26:14 crc kubenswrapper[4954]: I1128 16:26:14.630434 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rd6gg" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="registry-server" containerID="cri-o://9b9161263a2f3b6d212da9de7b744e7fe5d760427ad1b1d946fe722322754323" gracePeriod=2 Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.284151 4954 generic.go:334] "Generic (PLEG): container finished" podID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerID="9b9161263a2f3b6d212da9de7b744e7fe5d760427ad1b1d946fe722322754323" exitCode=0 Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.284232 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rd6gg" event={"ID":"d301eb21-71c9-4228-b7e7-b6d1c2b72e75","Type":"ContainerDied","Data":"9b9161263a2f3b6d212da9de7b744e7fe5d760427ad1b1d946fe722322754323"} Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.656759 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.719165 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-catalog-content\") pod \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.719222 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-utilities\") pod \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.719241 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94d2z\" (UniqueName: \"kubernetes.io/projected/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-kube-api-access-94d2z\") pod \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\" (UID: \"d301eb21-71c9-4228-b7e7-b6d1c2b72e75\") " Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.721113 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-utilities" (OuterVolumeSpecName: "utilities") pod "d301eb21-71c9-4228-b7e7-b6d1c2b72e75" (UID: "d301eb21-71c9-4228-b7e7-b6d1c2b72e75"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.727325 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-kube-api-access-94d2z" (OuterVolumeSpecName: "kube-api-access-94d2z") pod "d301eb21-71c9-4228-b7e7-b6d1c2b72e75" (UID: "d301eb21-71c9-4228-b7e7-b6d1c2b72e75"). InnerVolumeSpecName "kube-api-access-94d2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.738717 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d301eb21-71c9-4228-b7e7-b6d1c2b72e75" (UID: "d301eb21-71c9-4228-b7e7-b6d1c2b72e75"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.820751 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.820805 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:15 crc kubenswrapper[4954]: I1128 16:26:15.820849 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94d2z\" (UniqueName: \"kubernetes.io/projected/d301eb21-71c9-4228-b7e7-b6d1c2b72e75-kube-api-access-94d2z\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:16 crc kubenswrapper[4954]: I1128 16:26:16.298080 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rd6gg" event={"ID":"d301eb21-71c9-4228-b7e7-b6d1c2b72e75","Type":"ContainerDied","Data":"09a7158b76a29c2d7ae43b1ca9d4e446019f132274518043dbb4e8fad66596f5"} Nov 28 16:26:16 crc kubenswrapper[4954]: I1128 16:26:16.298461 4954 scope.go:117] "RemoveContainer" containerID="9b9161263a2f3b6d212da9de7b744e7fe5d760427ad1b1d946fe722322754323" Nov 28 16:26:16 crc kubenswrapper[4954]: I1128 16:26:16.298619 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rd6gg" Nov 28 16:26:16 crc kubenswrapper[4954]: I1128 16:26:16.323758 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rd6gg"] Nov 28 16:26:16 crc kubenswrapper[4954]: I1128 16:26:16.328493 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rd6gg"] Nov 28 16:26:16 crc kubenswrapper[4954]: I1128 16:26:16.331518 4954 scope.go:117] "RemoveContainer" containerID="e2dbcc00a400187a0277fcb0465db994bffbc3152052cb74f200cb05d4b8016f" Nov 28 16:26:16 crc kubenswrapper[4954]: I1128 16:26:16.347299 4954 scope.go:117] "RemoveContainer" containerID="ce6a53ad3b5bf514cbba829c5590a35d7957d59db2d58a06812c9ba692e42e96" Nov 28 16:26:17 crc kubenswrapper[4954]: I1128 16:26:17.212459 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:26:17 crc kubenswrapper[4954]: I1128 16:26:17.865903 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" path="/var/lib/kubelet/pods/d301eb21-71c9-4228-b7e7-b6d1c2b72e75/volumes" Nov 28 16:26:21 crc kubenswrapper[4954]: I1128 16:26:21.033697 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Nov 28 16:26:21 crc kubenswrapper[4954]: I1128 16:26:21.034300 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fl7qg" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="registry-server" containerID="cri-o://a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e" gracePeriod=2 Nov 28 16:26:21 crc kubenswrapper[4954]: I1128 16:26:21.913871 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.003217 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-catalog-content\") pod \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.003329 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-utilities\") pod \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.003393 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr557\" (UniqueName: \"kubernetes.io/projected/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-kube-api-access-gr557\") pod \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\" (UID: \"b5e33eb0-5434-4ba1-adcc-8e275488bd9c\") " Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.007044 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-utilities" (OuterVolumeSpecName: "utilities") pod "b5e33eb0-5434-4ba1-adcc-8e275488bd9c" (UID: "b5e33eb0-5434-4ba1-adcc-8e275488bd9c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.009225 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-kube-api-access-gr557" (OuterVolumeSpecName: "kube-api-access-gr557") pod "b5e33eb0-5434-4ba1-adcc-8e275488bd9c" (UID: "b5e33eb0-5434-4ba1-adcc-8e275488bd9c"). InnerVolumeSpecName "kube-api-access-gr557". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.054181 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b5e33eb0-5434-4ba1-adcc-8e275488bd9c" (UID: "b5e33eb0-5434-4ba1-adcc-8e275488bd9c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.106123 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr557\" (UniqueName: \"kubernetes.io/projected/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-kube-api-access-gr557\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.106172 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.106185 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b5e33eb0-5434-4ba1-adcc-8e275488bd9c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.332415 4954 generic.go:334] "Generic (PLEG): container finished" podID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerID="a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e" exitCode=0 Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.332460 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"b5e33eb0-5434-4ba1-adcc-8e275488bd9c","Type":"ContainerDied","Data":"a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e"} Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.332469 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.332490 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"b5e33eb0-5434-4ba1-adcc-8e275488bd9c","Type":"ContainerDied","Data":"1222689a7b78de285f8666e676c532147c2ae8a86d601ec247903f83d7753e40"} Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.332517 4954 scope.go:117] "RemoveContainer" containerID="a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.357626 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.357781 4954 scope.go:117] "RemoveContainer" containerID="793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.364946 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.392501 4954 scope.go:117] "RemoveContainer" containerID="a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.405461 4954 scope.go:117] "RemoveContainer" containerID="a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e" Nov 28 16:26:22 crc kubenswrapper[4954]: E1128 16:26:22.406064 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e\": container with ID starting with a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e not found: ID does not exist" containerID="a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.406193 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e"} err="failed to get container status \"a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e\": rpc error: code = NotFound desc = could not find container \"a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e\": container with ID starting with a7ea913ced2778656d1ff791c31bf4e1d4dbc1b89a6493bc9ae940882356c11e not found: ID does not exist" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.406216 4954 scope.go:117] "RemoveContainer" containerID="793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d" Nov 28 16:26:22 crc kubenswrapper[4954]: E1128 16:26:22.406512 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d\": container with ID starting with 793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d not found: ID does not exist" containerID="793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.406572 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d"} err="failed to get container status \"793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d\": rpc error: code = NotFound desc = could not find container \"793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d\": container with ID starting with 793c00c93b99fa421107c8693bfd42ec58824ee41561953cedff13af97f06b4d not found: ID does not exist" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.406602 4954 scope.go:117] "RemoveContainer" containerID="a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc" Nov 28 16:26:22 crc kubenswrapper[4954]: E1128 16:26:22.406993 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc\": container with ID starting with a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc not found: ID does not exist" containerID="a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc" Nov 28 16:26:22 crc kubenswrapper[4954]: I1128 16:26:22.407050 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc"} err="failed to get container status \"a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc\": rpc error: code = NotFound desc = could not find container \"a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc\": container with ID starting with a98382957b277caad45723dd72e97ce0fc76b37982951837262c7e36146988fc not found: ID does not exist" Nov 28 16:26:23 crc kubenswrapper[4954]: I1128 16:26:23.802438 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-67f7cc84c-gqj2n" Nov 28 16:26:23 crc kubenswrapper[4954]: I1128 16:26:23.878945 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" path="/var/lib/kubelet/pods/b5e33eb0-5434-4ba1-adcc-8e275488bd9c/volumes" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.469558 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-vxw8v"] Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.469954 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="extract-utilities" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470014 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="extract-utilities" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470090 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="extract-utilities" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470142 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="extract-utilities" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470189 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470235 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470313 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="extract-utilities" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470366 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="extract-utilities" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470415 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="extract-content" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470460 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="extract-content" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470511 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470590 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470647 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470694 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470740 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="extract-content" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470795 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="extract-content" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.470844 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="extract-content" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.470899 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="extract-content" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.471041 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="ded66232-6373-4951-9a72-406335b13f68" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.471101 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5e33eb0-5434-4ba1-adcc-8e275488bd9c" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.471157 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d301eb21-71c9-4228-b7e7-b6d1c2b72e75" containerName="registry-server" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.472923 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.475283 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.475740 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-zmw94" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.476296 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.479414 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559"] Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.481049 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.483985 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.494287 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559"] Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.583589 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-nb867"] Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.584647 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.588397 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.590249 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.590715 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-4wr9r" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.594844 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.606481 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-pl6wr"] Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.607351 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.611120 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.625249 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-pl6wr"] Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.639557 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5e787d2-d6a3-4e50-9a29-077809abbe6a-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-7q559\" (UID: \"b5e787d2-d6a3-4e50-9a29-077809abbe6a\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.639600 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics-certs\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.639631 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-sockets\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.639787 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-reloader\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.639840 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-conf\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.639970 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lp9sg\" (UniqueName: \"kubernetes.io/projected/b5e787d2-d6a3-4e50-9a29-077809abbe6a-kube-api-access-lp9sg\") pod \"frr-k8s-webhook-server-7fcb986d4-7q559\" (UID: \"b5e787d2-d6a3-4e50-9a29-077809abbe6a\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.640012 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-startup\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.640033 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.640050 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rp5d\" (UniqueName: \"kubernetes.io/projected/bb5e50bb-a885-4ae5-a7a8-d9214274540e-kube-api-access-9rp5d\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.741801 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cd7512cb-cf15-46bc-a4ce-a530e16c305e-cert\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.741878 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5e787d2-d6a3-4e50-9a29-077809abbe6a-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-7q559\" (UID: \"b5e787d2-d6a3-4e50-9a29-077809abbe6a\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.741912 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics-certs\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.741951 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-sockets\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.741979 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metrics-certs\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742013 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-reloader\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742037 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-conf\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742060 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.742077 4954 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742107 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metallb-excludel2\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742128 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7z5k\" (UniqueName: \"kubernetes.io/projected/cd7512cb-cf15-46bc-a4ce-a530e16c305e-kube-api-access-p7z5k\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.742162 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics-certs podName:bb5e50bb-a885-4ae5-a7a8-d9214274540e nodeName:}" failed. No retries permitted until 2025-11-28 16:26:25.242140942 +0000 UTC m=+938.633809483 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics-certs") pod "frr-k8s-vxw8v" (UID: "bb5e50bb-a885-4ae5-a7a8-d9214274540e") : secret "frr-k8s-certs-secret" not found Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742179 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lp9sg\" (UniqueName: \"kubernetes.io/projected/b5e787d2-d6a3-4e50-9a29-077809abbe6a-kube-api-access-lp9sg\") pod \"frr-k8s-webhook-server-7fcb986d4-7q559\" (UID: \"b5e787d2-d6a3-4e50-9a29-077809abbe6a\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742202 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlfsv\" (UniqueName: \"kubernetes.io/projected/f616c2cc-42cf-47c5-88f7-608060ddcd9e-kube-api-access-mlfsv\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742224 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-startup\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742246 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742270 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rp5d\" (UniqueName: \"kubernetes.io/projected/bb5e50bb-a885-4ae5-a7a8-d9214274540e-kube-api-access-9rp5d\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742291 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cd7512cb-cf15-46bc-a4ce-a530e16c305e-metrics-certs\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742497 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-sockets\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742557 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-reloader\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.742605 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-conf\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.743395 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/bb5e50bb-a885-4ae5-a7a8-d9214274540e-frr-startup\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.743548 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.747445 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5e787d2-d6a3-4e50-9a29-077809abbe6a-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-7q559\" (UID: \"b5e787d2-d6a3-4e50-9a29-077809abbe6a\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.764259 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rp5d\" (UniqueName: \"kubernetes.io/projected/bb5e50bb-a885-4ae5-a7a8-d9214274540e-kube-api-access-9rp5d\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.765213 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lp9sg\" (UniqueName: \"kubernetes.io/projected/b5e787d2-d6a3-4e50-9a29-077809abbe6a-kube-api-access-lp9sg\") pod \"frr-k8s-webhook-server-7fcb986d4-7q559\" (UID: \"b5e787d2-d6a3-4e50-9a29-077809abbe6a\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.796165 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.843654 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metallb-excludel2\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.843698 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7z5k\" (UniqueName: \"kubernetes.io/projected/cd7512cb-cf15-46bc-a4ce-a530e16c305e-kube-api-access-p7z5k\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.843721 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlfsv\" (UniqueName: \"kubernetes.io/projected/f616c2cc-42cf-47c5-88f7-608060ddcd9e-kube-api-access-mlfsv\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.843740 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cd7512cb-cf15-46bc-a4ce-a530e16c305e-metrics-certs\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.843761 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cd7512cb-cf15-46bc-a4ce-a530e16c305e-cert\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.843824 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metrics-certs\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.843849 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.843954 4954 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.844000 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist podName:f616c2cc-42cf-47c5-88f7-608060ddcd9e nodeName:}" failed. No retries permitted until 2025-11-28 16:26:25.343984357 +0000 UTC m=+938.735652898 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist") pod "speaker-nb867" (UID: "f616c2cc-42cf-47c5-88f7-608060ddcd9e") : secret "metallb-memberlist" not found Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.844680 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metallb-excludel2\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.844894 4954 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 28 16:26:24 crc kubenswrapper[4954]: E1128 16:26:24.844980 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metrics-certs podName:f616c2cc-42cf-47c5-88f7-608060ddcd9e nodeName:}" failed. No retries permitted until 2025-11-28 16:26:25.344958807 +0000 UTC m=+938.736627408 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metrics-certs") pod "speaker-nb867" (UID: "f616c2cc-42cf-47c5-88f7-608060ddcd9e") : secret "speaker-certs-secret" not found Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.847068 4954 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.852231 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cd7512cb-cf15-46bc-a4ce-a530e16c305e-metrics-certs\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.863020 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7z5k\" (UniqueName: \"kubernetes.io/projected/cd7512cb-cf15-46bc-a4ce-a530e16c305e-kube-api-access-p7z5k\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.864018 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cd7512cb-cf15-46bc-a4ce-a530e16c305e-cert\") pod \"controller-f8648f98b-pl6wr\" (UID: \"cd7512cb-cf15-46bc-a4ce-a530e16c305e\") " pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.865943 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlfsv\" (UniqueName: \"kubernetes.io/projected/f616c2cc-42cf-47c5-88f7-608060ddcd9e-kube-api-access-mlfsv\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:24 crc kubenswrapper[4954]: I1128 16:26:24.918832 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.017396 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559"] Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.128554 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-pl6wr"] Nov 28 16:26:25 crc kubenswrapper[4954]: W1128 16:26:25.134209 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd7512cb_cf15_46bc_a4ce_a530e16c305e.slice/crio-d0eb6eed0692a34b3ff69a3123fd1c148d8bbc5272a65cd18ff429fa521339a6 WatchSource:0}: Error finding container d0eb6eed0692a34b3ff69a3123fd1c148d8bbc5272a65cd18ff429fa521339a6: Status 404 returned error can't find the container with id d0eb6eed0692a34b3ff69a3123fd1c148d8bbc5272a65cd18ff429fa521339a6 Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.252164 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics-certs\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.255942 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb5e50bb-a885-4ae5-a7a8-d9214274540e-metrics-certs\") pod \"frr-k8s-vxw8v\" (UID: \"bb5e50bb-a885-4ae5-a7a8-d9214274540e\") " pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.348468 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-pl6wr" event={"ID":"cd7512cb-cf15-46bc-a4ce-a530e16c305e","Type":"ContainerStarted","Data":"d0eb6eed0692a34b3ff69a3123fd1c148d8bbc5272a65cd18ff429fa521339a6"} Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.349105 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" event={"ID":"b5e787d2-d6a3-4e50-9a29-077809abbe6a","Type":"ContainerStarted","Data":"92f53209010ae12054da8da53f871d5d41ef58fbd5af1e279eb041142f0a78be"} Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.352868 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metrics-certs\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.352908 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:25 crc kubenswrapper[4954]: E1128 16:26:25.353004 4954 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 16:26:25 crc kubenswrapper[4954]: E1128 16:26:25.353049 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist podName:f616c2cc-42cf-47c5-88f7-608060ddcd9e nodeName:}" failed. No retries permitted until 2025-11-28 16:26:26.353036286 +0000 UTC m=+939.744704817 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist") pod "speaker-nb867" (UID: "f616c2cc-42cf-47c5-88f7-608060ddcd9e") : secret "metallb-memberlist" not found Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.357667 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-metrics-certs\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:25 crc kubenswrapper[4954]: I1128 16:26:25.386447 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:26 crc kubenswrapper[4954]: I1128 16:26:26.365436 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:26 crc kubenswrapper[4954]: I1128 16:26:26.372048 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/f616c2cc-42cf-47c5-88f7-608060ddcd9e-memberlist\") pod \"speaker-nb867\" (UID: \"f616c2cc-42cf-47c5-88f7-608060ddcd9e\") " pod="metallb-system/speaker-nb867" Nov 28 16:26:26 crc kubenswrapper[4954]: I1128 16:26:26.665324 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-nb867" Nov 28 16:26:26 crc kubenswrapper[4954]: I1128 16:26:26.681737 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-pl6wr" event={"ID":"cd7512cb-cf15-46bc-a4ce-a530e16c305e","Type":"ContainerStarted","Data":"24d0619f87e54e810307a49bb5f06ee361bf7039cc98e54157f7d5b0e719c3fb"} Nov 28 16:26:26 crc kubenswrapper[4954]: I1128 16:26:26.683326 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerStarted","Data":"433dc108b9c525496a362e654c747ecf916dde0770bf339dd07ba121697ad0ba"} Nov 28 16:26:26 crc kubenswrapper[4954]: W1128 16:26:26.697941 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf616c2cc_42cf_47c5_88f7_608060ddcd9e.slice/crio-ebae2d9c6af2f21628182f41c27bc38241591a6940e451d5b3068e4ccf17a8f7 WatchSource:0}: Error finding container ebae2d9c6af2f21628182f41c27bc38241591a6940e451d5b3068e4ccf17a8f7: Status 404 returned error can't find the container with id ebae2d9c6af2f21628182f41c27bc38241591a6940e451d5b3068e4ccf17a8f7 Nov 28 16:26:27 crc kubenswrapper[4954]: I1128 16:26:27.689948 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nb867" event={"ID":"f616c2cc-42cf-47c5-88f7-608060ddcd9e","Type":"ContainerStarted","Data":"3fdd17fc8a7fdf33942b19875d8cc2c4f215242b891dbcad63eb9d7c0745b8b4"} Nov 28 16:26:27 crc kubenswrapper[4954]: I1128 16:26:27.690869 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nb867" event={"ID":"f616c2cc-42cf-47c5-88f7-608060ddcd9e","Type":"ContainerStarted","Data":"ebae2d9c6af2f21628182f41c27bc38241591a6940e451d5b3068e4ccf17a8f7"} Nov 28 16:26:27 crc kubenswrapper[4954]: I1128 16:26:27.692052 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-pl6wr" event={"ID":"cd7512cb-cf15-46bc-a4ce-a530e16c305e","Type":"ContainerStarted","Data":"890e65e279b840b11d6a813eb28c438532484c1ccf8531d811e732f184932d17"} Nov 28 16:26:27 crc kubenswrapper[4954]: I1128 16:26:27.692836 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:27 crc kubenswrapper[4954]: I1128 16:26:27.711920 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-pl6wr" podStartSLOduration=3.711905341 podStartE2EDuration="3.711905341s" podCreationTimestamp="2025-11-28 16:26:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:26:27.710792637 +0000 UTC m=+941.102461178" watchObservedRunningTime="2025-11-28 16:26:27.711905341 +0000 UTC m=+941.103573882" Nov 28 16:26:28 crc kubenswrapper[4954]: I1128 16:26:28.709893 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nb867" event={"ID":"f616c2cc-42cf-47c5-88f7-608060ddcd9e","Type":"ContainerStarted","Data":"9c3316a5033973ae64e575645742d113c457beb4c986ec94689b1f7c4076969c"} Nov 28 16:26:28 crc kubenswrapper[4954]: I1128 16:26:28.710194 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-nb867" Nov 28 16:26:28 crc kubenswrapper[4954]: I1128 16:26:28.728975 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-nb867" podStartSLOduration=4.728956666 podStartE2EDuration="4.728956666s" podCreationTimestamp="2025-11-28 16:26:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:26:28.726465938 +0000 UTC m=+942.118134489" watchObservedRunningTime="2025-11-28 16:26:28.728956666 +0000 UTC m=+942.120625207" Nov 28 16:26:32 crc kubenswrapper[4954]: I1128 16:26:32.480938 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:26:32 crc kubenswrapper[4954]: I1128 16:26:32.481335 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:26:34 crc kubenswrapper[4954]: I1128 16:26:34.751808 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" event={"ID":"b5e787d2-d6a3-4e50-9a29-077809abbe6a","Type":"ContainerStarted","Data":"fc30db2ca1f467d2550b1373b878c319d641ebb9e229ec3e9388b03ea58f19ea"} Nov 28 16:26:34 crc kubenswrapper[4954]: I1128 16:26:34.752222 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:34 crc kubenswrapper[4954]: I1128 16:26:34.753857 4954 generic.go:334] "Generic (PLEG): container finished" podID="bb5e50bb-a885-4ae5-a7a8-d9214274540e" containerID="a6d23aa2dff178050333e1821d88e88d406cd374f50685d03e4824b15b832881" exitCode=0 Nov 28 16:26:34 crc kubenswrapper[4954]: I1128 16:26:34.753910 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerDied","Data":"a6d23aa2dff178050333e1821d88e88d406cd374f50685d03e4824b15b832881"} Nov 28 16:26:34 crc kubenswrapper[4954]: I1128 16:26:34.781516 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" podStartSLOduration=1.881858493 podStartE2EDuration="10.781490059s" podCreationTimestamp="2025-11-28 16:26:24 +0000 UTC" firstStartedPulling="2025-11-28 16:26:25.03563912 +0000 UTC m=+938.427307661" lastFinishedPulling="2025-11-28 16:26:33.935270686 +0000 UTC m=+947.326939227" observedRunningTime="2025-11-28 16:26:34.77128192 +0000 UTC m=+948.162950491" watchObservedRunningTime="2025-11-28 16:26:34.781490059 +0000 UTC m=+948.173158640" Nov 28 16:26:35 crc kubenswrapper[4954]: I1128 16:26:35.772744 4954 generic.go:334] "Generic (PLEG): container finished" podID="bb5e50bb-a885-4ae5-a7a8-d9214274540e" containerID="5fb52dfa1666b93acd22e202aa8113a84957525327a8ec9dfcb751cc80ca780f" exitCode=0 Nov 28 16:26:35 crc kubenswrapper[4954]: I1128 16:26:35.773178 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerDied","Data":"5fb52dfa1666b93acd22e202aa8113a84957525327a8ec9dfcb751cc80ca780f"} Nov 28 16:26:36 crc kubenswrapper[4954]: I1128 16:26:36.781135 4954 generic.go:334] "Generic (PLEG): container finished" podID="bb5e50bb-a885-4ae5-a7a8-d9214274540e" containerID="f1653a974efef7f767dc0f83243c2dd6addecd2cc7f306fe513036bfe57a98d6" exitCode=0 Nov 28 16:26:36 crc kubenswrapper[4954]: I1128 16:26:36.781228 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerDied","Data":"f1653a974efef7f767dc0f83243c2dd6addecd2cc7f306fe513036bfe57a98d6"} Nov 28 16:26:37 crc kubenswrapper[4954]: I1128 16:26:37.790886 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerStarted","Data":"1c5da1f1d540d7125a08a499e63bd79a9d2bf143e6597af84058cd5c2a778f45"} Nov 28 16:26:38 crc kubenswrapper[4954]: I1128 16:26:38.801680 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerStarted","Data":"f3a1aa89736ea75bdb3e02b07664ec29b967770ffe778dc8b11c02535ae89840"} Nov 28 16:26:39 crc kubenswrapper[4954]: I1128 16:26:39.838286 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerStarted","Data":"e01a44eb9cef57fb074f1661c1152a897762a8833ebf7c5e9ec7ca0bc286f698"} Nov 28 16:26:39 crc kubenswrapper[4954]: I1128 16:26:39.838561 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerStarted","Data":"a63347dd4fb8a194ebb254bf55c42fdaec9519b59ad8a4702fdedab284b04c42"} Nov 28 16:26:40 crc kubenswrapper[4954]: I1128 16:26:40.849776 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerStarted","Data":"e37f987ffc9e23de1fc519064ae7d9df536ab280293dd55ed5066472caf43f44"} Nov 28 16:26:40 crc kubenswrapper[4954]: I1128 16:26:40.850116 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-vxw8v" event={"ID":"bb5e50bb-a885-4ae5-a7a8-d9214274540e","Type":"ContainerStarted","Data":"84897c2ec98380bee950c5295164b803de19691803395ed6d8862ab9a0edab63"} Nov 28 16:26:40 crc kubenswrapper[4954]: I1128 16:26:40.850161 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:40 crc kubenswrapper[4954]: I1128 16:26:40.876193 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-vxw8v" podStartSLOduration=8.466300019 podStartE2EDuration="16.876176079s" podCreationTimestamp="2025-11-28 16:26:24 +0000 UTC" firstStartedPulling="2025-11-28 16:26:25.546921639 +0000 UTC m=+938.938590220" lastFinishedPulling="2025-11-28 16:26:33.956797739 +0000 UTC m=+947.348466280" observedRunningTime="2025-11-28 16:26:40.870836232 +0000 UTC m=+954.262504773" watchObservedRunningTime="2025-11-28 16:26:40.876176079 +0000 UTC m=+954.267844620" Nov 28 16:26:44 crc kubenswrapper[4954]: I1128 16:26:44.800661 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-7q559" Nov 28 16:26:44 crc kubenswrapper[4954]: I1128 16:26:44.923919 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-pl6wr" Nov 28 16:26:45 crc kubenswrapper[4954]: I1128 16:26:45.387374 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:45 crc kubenswrapper[4954]: I1128 16:26:45.427959 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:46 crc kubenswrapper[4954]: I1128 16:26:46.669543 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-nb867" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.175615 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862"] Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.178237 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.180001 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.187723 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862"] Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.344712 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.344781 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.344811 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4djz\" (UniqueName: \"kubernetes.io/projected/6b04abe7-9fca-439a-809b-e6c5c09aa88b-kube-api-access-c4djz\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.447340 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.447848 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.448064 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4djz\" (UniqueName: \"kubernetes.io/projected/6b04abe7-9fca-439a-809b-e6c5c09aa88b-kube-api-access-c4djz\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.448661 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.448675 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.472581 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4djz\" (UniqueName: \"kubernetes.io/projected/6b04abe7-9fca-439a-809b-e6c5c09aa88b-kube-api-access-c4djz\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.549876 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:48 crc kubenswrapper[4954]: I1128 16:26:48.959138 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862"] Nov 28 16:26:48 crc kubenswrapper[4954]: W1128 16:26:48.966745 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b04abe7_9fca_439a_809b_e6c5c09aa88b.slice/crio-93c09024245534955b9860478d6baa480f592ff073b65d7726fe3bfc2c21b678 WatchSource:0}: Error finding container 93c09024245534955b9860478d6baa480f592ff073b65d7726fe3bfc2c21b678: Status 404 returned error can't find the container with id 93c09024245534955b9860478d6baa480f592ff073b65d7726fe3bfc2c21b678 Nov 28 16:26:49 crc kubenswrapper[4954]: I1128 16:26:49.902658 4954 generic.go:334] "Generic (PLEG): container finished" podID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerID="0b278743cc96b5e21a141e1b668d7c927a768ee990a77787cd4be3680b3937be" exitCode=0 Nov 28 16:26:49 crc kubenswrapper[4954]: I1128 16:26:49.902700 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" event={"ID":"6b04abe7-9fca-439a-809b-e6c5c09aa88b","Type":"ContainerDied","Data":"0b278743cc96b5e21a141e1b668d7c927a768ee990a77787cd4be3680b3937be"} Nov 28 16:26:49 crc kubenswrapper[4954]: I1128 16:26:49.902742 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" event={"ID":"6b04abe7-9fca-439a-809b-e6c5c09aa88b","Type":"ContainerStarted","Data":"93c09024245534955b9860478d6baa480f592ff073b65d7726fe3bfc2c21b678"} Nov 28 16:26:53 crc kubenswrapper[4954]: I1128 16:26:53.931009 4954 generic.go:334] "Generic (PLEG): container finished" podID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerID="3d38327e3b86ba94808bfa591ef6c0a515240a661bba5bb8be38509a524d12b7" exitCode=0 Nov 28 16:26:53 crc kubenswrapper[4954]: I1128 16:26:53.931052 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" event={"ID":"6b04abe7-9fca-439a-809b-e6c5c09aa88b","Type":"ContainerDied","Data":"3d38327e3b86ba94808bfa591ef6c0a515240a661bba5bb8be38509a524d12b7"} Nov 28 16:26:54 crc kubenswrapper[4954]: I1128 16:26:54.940550 4954 generic.go:334] "Generic (PLEG): container finished" podID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerID="8ef6aaa6bc3859b60ee41f101701612f9c3b35e993c776f7256fb74955c54adc" exitCode=0 Nov 28 16:26:54 crc kubenswrapper[4954]: I1128 16:26:54.940626 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" event={"ID":"6b04abe7-9fca-439a-809b-e6c5c09aa88b","Type":"ContainerDied","Data":"8ef6aaa6bc3859b60ee41f101701612f9c3b35e993c776f7256fb74955c54adc"} Nov 28 16:26:55 crc kubenswrapper[4954]: I1128 16:26:55.388907 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-vxw8v" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.168701 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.363910 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-bundle\") pod \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.364006 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4djz\" (UniqueName: \"kubernetes.io/projected/6b04abe7-9fca-439a-809b-e6c5c09aa88b-kube-api-access-c4djz\") pod \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.364059 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-util\") pod \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\" (UID: \"6b04abe7-9fca-439a-809b-e6c5c09aa88b\") " Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.365870 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-bundle" (OuterVolumeSpecName: "bundle") pod "6b04abe7-9fca-439a-809b-e6c5c09aa88b" (UID: "6b04abe7-9fca-439a-809b-e6c5c09aa88b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.373829 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b04abe7-9fca-439a-809b-e6c5c09aa88b-kube-api-access-c4djz" (OuterVolumeSpecName: "kube-api-access-c4djz") pod "6b04abe7-9fca-439a-809b-e6c5c09aa88b" (UID: "6b04abe7-9fca-439a-809b-e6c5c09aa88b"). InnerVolumeSpecName "kube-api-access-c4djz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.388049 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-util" (OuterVolumeSpecName: "util") pod "6b04abe7-9fca-439a-809b-e6c5c09aa88b" (UID: "6b04abe7-9fca-439a-809b-e6c5c09aa88b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.465636 4954 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.465678 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4djz\" (UniqueName: \"kubernetes.io/projected/6b04abe7-9fca-439a-809b-e6c5c09aa88b-kube-api-access-c4djz\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.465690 4954 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6b04abe7-9fca-439a-809b-e6c5c09aa88b-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.964608 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" event={"ID":"6b04abe7-9fca-439a-809b-e6c5c09aa88b","Type":"ContainerDied","Data":"93c09024245534955b9860478d6baa480f592ff073b65d7726fe3bfc2c21b678"} Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.964654 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93c09024245534955b9860478d6baa480f592ff073b65d7726fe3bfc2c21b678" Nov 28 16:26:56 crc kubenswrapper[4954]: I1128 16:26:56.964739 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.987090 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv"] Nov 28 16:27:00 crc kubenswrapper[4954]: E1128 16:27:00.987621 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerName="extract" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.987633 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerName="extract" Nov 28 16:27:00 crc kubenswrapper[4954]: E1128 16:27:00.987647 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerName="pull" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.987653 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerName="pull" Nov 28 16:27:00 crc kubenswrapper[4954]: E1128 16:27:00.987675 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerName="util" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.987680 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerName="util" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.987777 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b04abe7-9fca-439a-809b-e6c5c09aa88b" containerName="extract" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.988144 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.990912 4954 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-m2jst" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.991540 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 28 16:27:00 crc kubenswrapper[4954]: I1128 16:27:00.993017 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.060923 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv"] Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.065217 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc9th\" (UniqueName: \"kubernetes.io/projected/227a93ef-a614-4d22-bce9-84ec1dbadfc9-kube-api-access-nc9th\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jpqzv\" (UID: \"227a93ef-a614-4d22-bce9-84ec1dbadfc9\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.065283 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/227a93ef-a614-4d22-bce9-84ec1dbadfc9-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jpqzv\" (UID: \"227a93ef-a614-4d22-bce9-84ec1dbadfc9\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.166374 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc9th\" (UniqueName: \"kubernetes.io/projected/227a93ef-a614-4d22-bce9-84ec1dbadfc9-kube-api-access-nc9th\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jpqzv\" (UID: \"227a93ef-a614-4d22-bce9-84ec1dbadfc9\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.166815 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/227a93ef-a614-4d22-bce9-84ec1dbadfc9-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jpqzv\" (UID: \"227a93ef-a614-4d22-bce9-84ec1dbadfc9\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.167358 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/227a93ef-a614-4d22-bce9-84ec1dbadfc9-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jpqzv\" (UID: \"227a93ef-a614-4d22-bce9-84ec1dbadfc9\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.192566 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc9th\" (UniqueName: \"kubernetes.io/projected/227a93ef-a614-4d22-bce9-84ec1dbadfc9-kube-api-access-nc9th\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jpqzv\" (UID: \"227a93ef-a614-4d22-bce9-84ec1dbadfc9\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.310464 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.604100 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv"] Nov 28 16:27:01 crc kubenswrapper[4954]: W1128 16:27:01.613174 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod227a93ef_a614_4d22_bce9_84ec1dbadfc9.slice/crio-83e846aef003f033ece1e43cccc0a77eced48f75b5df7a06186727c9eefbbc97 WatchSource:0}: Error finding container 83e846aef003f033ece1e43cccc0a77eced48f75b5df7a06186727c9eefbbc97: Status 404 returned error can't find the container with id 83e846aef003f033ece1e43cccc0a77eced48f75b5df7a06186727c9eefbbc97 Nov 28 16:27:01 crc kubenswrapper[4954]: I1128 16:27:01.993207 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" event={"ID":"227a93ef-a614-4d22-bce9-84ec1dbadfc9","Type":"ContainerStarted","Data":"83e846aef003f033ece1e43cccc0a77eced48f75b5df7a06186727c9eefbbc97"} Nov 28 16:27:02 crc kubenswrapper[4954]: I1128 16:27:02.481452 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:27:02 crc kubenswrapper[4954]: I1128 16:27:02.481512 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:27:02 crc kubenswrapper[4954]: I1128 16:27:02.481569 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:27:02 crc kubenswrapper[4954]: I1128 16:27:02.482121 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"81405dd859c5dce906875516afa2e66fb81aa5bf95b3268c18658c9aa3d313f9"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:27:02 crc kubenswrapper[4954]: I1128 16:27:02.482178 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://81405dd859c5dce906875516afa2e66fb81aa5bf95b3268c18658c9aa3d313f9" gracePeriod=600 Nov 28 16:27:07 crc kubenswrapper[4954]: I1128 16:27:07.040166 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="81405dd859c5dce906875516afa2e66fb81aa5bf95b3268c18658c9aa3d313f9" exitCode=0 Nov 28 16:27:07 crc kubenswrapper[4954]: I1128 16:27:07.040247 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"81405dd859c5dce906875516afa2e66fb81aa5bf95b3268c18658c9aa3d313f9"} Nov 28 16:27:07 crc kubenswrapper[4954]: I1128 16:27:07.040868 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"01dcf9beb4ee4352c09c120b16a87c14a68604f09bcc07e4fe16753903887aac"} Nov 28 16:27:07 crc kubenswrapper[4954]: I1128 16:27:07.040893 4954 scope.go:117] "RemoveContainer" containerID="10fedf83578e5a7a1a89aad8135351fe55542f6bf779f4911dd9e5f625bbbf3b" Nov 28 16:27:15 crc kubenswrapper[4954]: I1128 16:27:15.102708 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" event={"ID":"227a93ef-a614-4d22-bce9-84ec1dbadfc9","Type":"ContainerStarted","Data":"ef88108a28c75edde4929a4ed64384e4b2308144d1b60b9009abde35b95d1d2f"} Nov 28 16:27:15 crc kubenswrapper[4954]: I1128 16:27:15.130420 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jpqzv" podStartSLOduration=2.2952743079999998 podStartE2EDuration="15.130402002s" podCreationTimestamp="2025-11-28 16:27:00 +0000 UTC" firstStartedPulling="2025-11-28 16:27:01.615728928 +0000 UTC m=+975.007397469" lastFinishedPulling="2025-11-28 16:27:14.450856622 +0000 UTC m=+987.842525163" observedRunningTime="2025-11-28 16:27:15.126824201 +0000 UTC m=+988.518492742" watchObservedRunningTime="2025-11-28 16:27:15.130402002 +0000 UTC m=+988.522070543" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.494682 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-mlb76"] Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.495596 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.501742 4954 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-8n544" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.502124 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.502468 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2e56d709-e547-46e4-9bb5-f8c92f1742ce-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-mlb76\" (UID: \"2e56d709-e547-46e4-9bb5-f8c92f1742ce\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.502633 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx9ws\" (UniqueName: \"kubernetes.io/projected/2e56d709-e547-46e4-9bb5-f8c92f1742ce-kube-api-access-qx9ws\") pod \"cert-manager-webhook-f4fb5df64-mlb76\" (UID: \"2e56d709-e547-46e4-9bb5-f8c92f1742ce\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.503375 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.507987 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-mlb76"] Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.603571 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx9ws\" (UniqueName: \"kubernetes.io/projected/2e56d709-e547-46e4-9bb5-f8c92f1742ce-kube-api-access-qx9ws\") pod \"cert-manager-webhook-f4fb5df64-mlb76\" (UID: \"2e56d709-e547-46e4-9bb5-f8c92f1742ce\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.603623 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2e56d709-e547-46e4-9bb5-f8c92f1742ce-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-mlb76\" (UID: \"2e56d709-e547-46e4-9bb5-f8c92f1742ce\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.630996 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx9ws\" (UniqueName: \"kubernetes.io/projected/2e56d709-e547-46e4-9bb5-f8c92f1742ce-kube-api-access-qx9ws\") pod \"cert-manager-webhook-f4fb5df64-mlb76\" (UID: \"2e56d709-e547-46e4-9bb5-f8c92f1742ce\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.634936 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2e56d709-e547-46e4-9bb5-f8c92f1742ce-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-mlb76\" (UID: \"2e56d709-e547-46e4-9bb5-f8c92f1742ce\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:17 crc kubenswrapper[4954]: I1128 16:27:17.809546 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:18 crc kubenswrapper[4954]: I1128 16:27:18.077200 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-mlb76"] Nov 28 16:27:18 crc kubenswrapper[4954]: I1128 16:27:18.120252 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" event={"ID":"2e56d709-e547-46e4-9bb5-f8c92f1742ce","Type":"ContainerStarted","Data":"06b07b6c008524ec7fd1650c0e3d93f1e02862d529a3473e9d1d0dda80081164"} Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.384007 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-96289"] Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.385247 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.392135 4954 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-cs8gs" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.398253 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-96289"] Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.426803 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8c016281-4e3e-4410-81e7-ede4c7f72535-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-96289\" (UID: \"8c016281-4e3e-4410-81e7-ede4c7f72535\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.426913 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmc94\" (UniqueName: \"kubernetes.io/projected/8c016281-4e3e-4410-81e7-ede4c7f72535-kube-api-access-pmc94\") pod \"cert-manager-cainjector-855d9ccff4-96289\" (UID: \"8c016281-4e3e-4410-81e7-ede4c7f72535\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.528385 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmc94\" (UniqueName: \"kubernetes.io/projected/8c016281-4e3e-4410-81e7-ede4c7f72535-kube-api-access-pmc94\") pod \"cert-manager-cainjector-855d9ccff4-96289\" (UID: \"8c016281-4e3e-4410-81e7-ede4c7f72535\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.528479 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8c016281-4e3e-4410-81e7-ede4c7f72535-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-96289\" (UID: \"8c016281-4e3e-4410-81e7-ede4c7f72535\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.545898 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8c016281-4e3e-4410-81e7-ede4c7f72535-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-96289\" (UID: \"8c016281-4e3e-4410-81e7-ede4c7f72535\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.546208 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmc94\" (UniqueName: \"kubernetes.io/projected/8c016281-4e3e-4410-81e7-ede4c7f72535-kube-api-access-pmc94\") pod \"cert-manager-cainjector-855d9ccff4-96289\" (UID: \"8c016281-4e3e-4410-81e7-ede4c7f72535\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:19 crc kubenswrapper[4954]: I1128 16:27:19.709774 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" Nov 28 16:27:20 crc kubenswrapper[4954]: I1128 16:27:20.211891 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-96289"] Nov 28 16:27:21 crc kubenswrapper[4954]: I1128 16:27:21.160018 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" event={"ID":"8c016281-4e3e-4410-81e7-ede4c7f72535","Type":"ContainerStarted","Data":"61aca46ff7d52e28778d6ea1b18cbcbbdf8ac59e6655116fdb3920c8eda7c141"} Nov 28 16:27:35 crc kubenswrapper[4954]: E1128 16:27:35.578340 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:29a0fa1c2f2a6cee62a0468a3883d16d491b4af29130dad6e3e2bb2948f274df" Nov 28 16:27:35 crc kubenswrapper[4954]: E1128 16:27:35.579906 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cert-manager-webhook,Image:registry.redhat.io/cert-manager/jetstack-cert-manager-rhel9@sha256:29a0fa1c2f2a6cee62a0468a3883d16d491b4af29130dad6e3e2bb2948f274df,Command:[/app/cmd/webhook/webhook],Args:[--dynamic-serving-ca-secret-name=cert-manager-webhook-ca --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) --dynamic-serving-dns-names=cert-manager-webhook,cert-manager-webhook.$(POD_NAMESPACE),cert-manager-webhook.$(POD_NAMESPACE).svc --secure-port=10250 --v=2],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:10250,Protocol:TCP,HostIP:,},ContainerPort{Name:healthcheck,HostPort:0,ContainerPort:6080,Protocol:TCP,HostIP:,},ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9402,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bound-sa-token,ReadOnly:true,MountPath:/var/run/secrets/openshift/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qx9ws,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{1 0 healthcheck},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:60,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{1 0 healthcheck},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000710000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cert-manager-webhook-f4fb5df64-mlb76_cert-manager(2e56d709-e547-46e4-9bb5-f8c92f1742ce): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 16:27:35 crc kubenswrapper[4954]: E1128 16:27:35.581691 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" podUID="2e56d709-e547-46e4-9bb5-f8c92f1742ce" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.610178 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rzs6f"] Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.610923 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.614035 4954 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-lf5mx" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.633597 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rzs6f"] Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.705252 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j26g\" (UniqueName: \"kubernetes.io/projected/b480d999-11d1-495e-8220-7c149204d009-kube-api-access-7j26g\") pod \"cert-manager-86cb77c54b-rzs6f\" (UID: \"b480d999-11d1-495e-8220-7c149204d009\") " pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.705371 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b480d999-11d1-495e-8220-7c149204d009-bound-sa-token\") pod \"cert-manager-86cb77c54b-rzs6f\" (UID: \"b480d999-11d1-495e-8220-7c149204d009\") " pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.806674 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b480d999-11d1-495e-8220-7c149204d009-bound-sa-token\") pod \"cert-manager-86cb77c54b-rzs6f\" (UID: \"b480d999-11d1-495e-8220-7c149204d009\") " pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.806725 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j26g\" (UniqueName: \"kubernetes.io/projected/b480d999-11d1-495e-8220-7c149204d009-kube-api-access-7j26g\") pod \"cert-manager-86cb77c54b-rzs6f\" (UID: \"b480d999-11d1-495e-8220-7c149204d009\") " pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.826517 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b480d999-11d1-495e-8220-7c149204d009-bound-sa-token\") pod \"cert-manager-86cb77c54b-rzs6f\" (UID: \"b480d999-11d1-495e-8220-7c149204d009\") " pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.826950 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j26g\" (UniqueName: \"kubernetes.io/projected/b480d999-11d1-495e-8220-7c149204d009-kube-api-access-7j26g\") pod \"cert-manager-86cb77c54b-rzs6f\" (UID: \"b480d999-11d1-495e-8220-7c149204d009\") " pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:36 crc kubenswrapper[4954]: I1128 16:27:36.958150 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-rzs6f" Nov 28 16:27:37 crc kubenswrapper[4954]: I1128 16:27:37.330965 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" event={"ID":"8c016281-4e3e-4410-81e7-ede4c7f72535","Type":"ContainerStarted","Data":"665cfaf3bcf5905dffe52d62ce2de7376c833bc6e65c1fa314b45e7549cf967d"} Nov 28 16:27:37 crc kubenswrapper[4954]: I1128 16:27:37.333367 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" event={"ID":"2e56d709-e547-46e4-9bb5-f8c92f1742ce","Type":"ContainerStarted","Data":"7b23754eb92c7152151902eafc916834ef97520dab2b894758dcfe5908c49b09"} Nov 28 16:27:37 crc kubenswrapper[4954]: I1128 16:27:37.333623 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:37 crc kubenswrapper[4954]: I1128 16:27:37.383939 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-96289" podStartSLOduration=1.9125381799999999 podStartE2EDuration="18.383921987s" podCreationTimestamp="2025-11-28 16:27:19 +0000 UTC" firstStartedPulling="2025-11-28 16:27:20.236201449 +0000 UTC m=+993.627869990" lastFinishedPulling="2025-11-28 16:27:36.707585256 +0000 UTC m=+1010.099253797" observedRunningTime="2025-11-28 16:27:37.356638743 +0000 UTC m=+1010.748307324" watchObservedRunningTime="2025-11-28 16:27:37.383921987 +0000 UTC m=+1010.775590538" Nov 28 16:27:37 crc kubenswrapper[4954]: I1128 16:27:37.387023 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" podStartSLOduration=-9223372016.467762 podStartE2EDuration="20.387014502s" podCreationTimestamp="2025-11-28 16:27:17 +0000 UTC" firstStartedPulling="2025-11-28 16:27:18.082058905 +0000 UTC m=+991.473727446" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:27:37.381173031 +0000 UTC m=+1010.772841582" watchObservedRunningTime="2025-11-28 16:27:37.387014502 +0000 UTC m=+1010.778683053" Nov 28 16:27:37 crc kubenswrapper[4954]: I1128 16:27:37.398092 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rzs6f"] Nov 28 16:27:38 crc kubenswrapper[4954]: I1128 16:27:38.341778 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-rzs6f" event={"ID":"b480d999-11d1-495e-8220-7c149204d009","Type":"ContainerStarted","Data":"b6e5052ee6229250780aae2132c063069ddd22cad4df4b387967cbf2af7ef8fd"} Nov 28 16:27:38 crc kubenswrapper[4954]: I1128 16:27:38.342152 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-rzs6f" event={"ID":"b480d999-11d1-495e-8220-7c149204d009","Type":"ContainerStarted","Data":"2baa830eb46b5b99669fda33c895076652a7da95dacee82810d81fdee4ef900d"} Nov 28 16:27:38 crc kubenswrapper[4954]: I1128 16:27:38.364725 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-rzs6f" podStartSLOduration=2.364704556 podStartE2EDuration="2.364704556s" podCreationTimestamp="2025-11-28 16:27:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:27:38.363819609 +0000 UTC m=+1011.755488150" watchObservedRunningTime="2025-11-28 16:27:38.364704556 +0000 UTC m=+1011.756373097" Nov 28 16:27:42 crc kubenswrapper[4954]: I1128 16:27:42.814867 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-mlb76" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.264626 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-v5768"] Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.268225 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v5768" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.271179 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-v5768"] Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.284316 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-7gb48" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.284482 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.284675 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.428682 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rhdm\" (UniqueName: \"kubernetes.io/projected/882886fc-6463-4729-a868-6e3680af2e4e-kube-api-access-9rhdm\") pod \"openstack-operator-index-v5768\" (UID: \"882886fc-6463-4729-a868-6e3680af2e4e\") " pod="openstack-operators/openstack-operator-index-v5768" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.530569 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rhdm\" (UniqueName: \"kubernetes.io/projected/882886fc-6463-4729-a868-6e3680af2e4e-kube-api-access-9rhdm\") pod \"openstack-operator-index-v5768\" (UID: \"882886fc-6463-4729-a868-6e3680af2e4e\") " pod="openstack-operators/openstack-operator-index-v5768" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.550257 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rhdm\" (UniqueName: \"kubernetes.io/projected/882886fc-6463-4729-a868-6e3680af2e4e-kube-api-access-9rhdm\") pod \"openstack-operator-index-v5768\" (UID: \"882886fc-6463-4729-a868-6e3680af2e4e\") " pod="openstack-operators/openstack-operator-index-v5768" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.593905 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v5768" Nov 28 16:27:46 crc kubenswrapper[4954]: I1128 16:27:46.915692 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-v5768"] Nov 28 16:27:46 crc kubenswrapper[4954]: W1128 16:27:46.924821 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod882886fc_6463_4729_a868_6e3680af2e4e.slice/crio-2923c6ea23f05307b541042a20ab76c8fb8613fa266998a7ef2c2c97bc081516 WatchSource:0}: Error finding container 2923c6ea23f05307b541042a20ab76c8fb8613fa266998a7ef2c2c97bc081516: Status 404 returned error can't find the container with id 2923c6ea23f05307b541042a20ab76c8fb8613fa266998a7ef2c2c97bc081516 Nov 28 16:27:47 crc kubenswrapper[4954]: I1128 16:27:47.404180 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v5768" event={"ID":"882886fc-6463-4729-a868-6e3680af2e4e","Type":"ContainerStarted","Data":"2923c6ea23f05307b541042a20ab76c8fb8613fa266998a7ef2c2c97bc081516"} Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.046928 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-v5768"] Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.638543 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-xw2zf"] Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.639256 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.648845 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xw2zf"] Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.780195 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flrds\" (UniqueName: \"kubernetes.io/projected/01a1f124-447b-410d-9a6b-ebff06524b31-kube-api-access-flrds\") pod \"openstack-operator-index-xw2zf\" (UID: \"01a1f124-447b-410d-9a6b-ebff06524b31\") " pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.880975 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flrds\" (UniqueName: \"kubernetes.io/projected/01a1f124-447b-410d-9a6b-ebff06524b31-kube-api-access-flrds\") pod \"openstack-operator-index-xw2zf\" (UID: \"01a1f124-447b-410d-9a6b-ebff06524b31\") " pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.899491 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flrds\" (UniqueName: \"kubernetes.io/projected/01a1f124-447b-410d-9a6b-ebff06524b31-kube-api-access-flrds\") pod \"openstack-operator-index-xw2zf\" (UID: \"01a1f124-447b-410d-9a6b-ebff06524b31\") " pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:27:49 crc kubenswrapper[4954]: I1128 16:27:49.953310 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:27:50 crc kubenswrapper[4954]: I1128 16:27:50.709565 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-xw2zf"] Nov 28 16:27:51 crc kubenswrapper[4954]: W1128 16:27:51.545563 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01a1f124_447b_410d_9a6b_ebff06524b31.slice/crio-104e1106d76ec3757edb68a383e866141db3c759da18df91b9a87b8e7eda45f5 WatchSource:0}: Error finding container 104e1106d76ec3757edb68a383e866141db3c759da18df91b9a87b8e7eda45f5: Status 404 returned error can't find the container with id 104e1106d76ec3757edb68a383e866141db3c759da18df91b9a87b8e7eda45f5 Nov 28 16:27:52 crc kubenswrapper[4954]: I1128 16:27:52.435756 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xw2zf" event={"ID":"01a1f124-447b-410d-9a6b-ebff06524b31","Type":"ContainerStarted","Data":"104e1106d76ec3757edb68a383e866141db3c759da18df91b9a87b8e7eda45f5"} Nov 28 16:27:58 crc kubenswrapper[4954]: I1128 16:27:58.472193 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v5768" event={"ID":"882886fc-6463-4729-a868-6e3680af2e4e","Type":"ContainerStarted","Data":"72de7d1a738244baec468825ebe05ff5e86e922ab8dba273a4549223b423e155"} Nov 28 16:27:58 crc kubenswrapper[4954]: I1128 16:27:58.472306 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-v5768" podUID="882886fc-6463-4729-a868-6e3680af2e4e" containerName="registry-server" containerID="cri-o://72de7d1a738244baec468825ebe05ff5e86e922ab8dba273a4549223b423e155" gracePeriod=2 Nov 28 16:27:58 crc kubenswrapper[4954]: I1128 16:27:58.475396 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-xw2zf" event={"ID":"01a1f124-447b-410d-9a6b-ebff06524b31","Type":"ContainerStarted","Data":"500aabe315eb0a25263a6961a2641a92fbd00169d6fefe84d319d94bb5851925"} Nov 28 16:27:58 crc kubenswrapper[4954]: I1128 16:27:58.495330 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-v5768" podStartSLOduration=1.643785749 podStartE2EDuration="12.495307642s" podCreationTimestamp="2025-11-28 16:27:46 +0000 UTC" firstStartedPulling="2025-11-28 16:27:46.927149968 +0000 UTC m=+1020.318818509" lastFinishedPulling="2025-11-28 16:27:57.778671831 +0000 UTC m=+1031.170340402" observedRunningTime="2025-11-28 16:27:58.489689787 +0000 UTC m=+1031.881358348" watchObservedRunningTime="2025-11-28 16:27:58.495307642 +0000 UTC m=+1031.886976193" Nov 28 16:27:58 crc kubenswrapper[4954]: I1128 16:27:58.510848 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-xw2zf" podStartSLOduration=3.280922769 podStartE2EDuration="9.510827167s" podCreationTimestamp="2025-11-28 16:27:49 +0000 UTC" firstStartedPulling="2025-11-28 16:27:51.5493255 +0000 UTC m=+1024.940994061" lastFinishedPulling="2025-11-28 16:27:57.779229918 +0000 UTC m=+1031.170898459" observedRunningTime="2025-11-28 16:27:58.509794825 +0000 UTC m=+1031.901463376" watchObservedRunningTime="2025-11-28 16:27:58.510827167 +0000 UTC m=+1031.902495718" Nov 28 16:27:59 crc kubenswrapper[4954]: I1128 16:27:59.488355 4954 generic.go:334] "Generic (PLEG): container finished" podID="882886fc-6463-4729-a868-6e3680af2e4e" containerID="72de7d1a738244baec468825ebe05ff5e86e922ab8dba273a4549223b423e155" exitCode=0 Nov 28 16:27:59 crc kubenswrapper[4954]: I1128 16:27:59.488466 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v5768" event={"ID":"882886fc-6463-4729-a868-6e3680af2e4e","Type":"ContainerDied","Data":"72de7d1a738244baec468825ebe05ff5e86e922ab8dba273a4549223b423e155"} Nov 28 16:27:59 crc kubenswrapper[4954]: I1128 16:27:59.953609 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:27:59 crc kubenswrapper[4954]: I1128 16:27:59.953678 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:27:59 crc kubenswrapper[4954]: I1128 16:27:59.992845 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:28:03 crc kubenswrapper[4954]: I1128 16:28:03.940623 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v5768" Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.124911 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rhdm\" (UniqueName: \"kubernetes.io/projected/882886fc-6463-4729-a868-6e3680af2e4e-kube-api-access-9rhdm\") pod \"882886fc-6463-4729-a868-6e3680af2e4e\" (UID: \"882886fc-6463-4729-a868-6e3680af2e4e\") " Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.133609 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/882886fc-6463-4729-a868-6e3680af2e4e-kube-api-access-9rhdm" (OuterVolumeSpecName: "kube-api-access-9rhdm") pod "882886fc-6463-4729-a868-6e3680af2e4e" (UID: "882886fc-6463-4729-a868-6e3680af2e4e"). InnerVolumeSpecName "kube-api-access-9rhdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.227048 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rhdm\" (UniqueName: \"kubernetes.io/projected/882886fc-6463-4729-a868-6e3680af2e4e-kube-api-access-9rhdm\") on node \"crc\" DevicePath \"\"" Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.526901 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-v5768" event={"ID":"882886fc-6463-4729-a868-6e3680af2e4e","Type":"ContainerDied","Data":"2923c6ea23f05307b541042a20ab76c8fb8613fa266998a7ef2c2c97bc081516"} Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.526951 4954 scope.go:117] "RemoveContainer" containerID="72de7d1a738244baec468825ebe05ff5e86e922ab8dba273a4549223b423e155" Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.527018 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-v5768" Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.571771 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-v5768"] Nov 28 16:28:04 crc kubenswrapper[4954]: I1128 16:28:04.577250 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-v5768"] Nov 28 16:28:05 crc kubenswrapper[4954]: I1128 16:28:05.865605 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="882886fc-6463-4729-a868-6e3680af2e4e" path="/var/lib/kubelet/pods/882886fc-6463-4729-a868-6e3680af2e4e/volumes" Nov 28 16:28:09 crc kubenswrapper[4954]: I1128 16:28:09.997640 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-xw2zf" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.156758 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc"] Nov 28 16:28:21 crc kubenswrapper[4954]: E1128 16:28:21.157930 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="882886fc-6463-4729-a868-6e3680af2e4e" containerName="registry-server" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.157960 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="882886fc-6463-4729-a868-6e3680af2e4e" containerName="registry-server" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.158322 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="882886fc-6463-4729-a868-6e3680af2e4e" containerName="registry-server" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.159992 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.163559 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-s568r" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.174567 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc"] Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.269495 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-bundle\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.270173 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-util\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.270340 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm2dq\" (UniqueName: \"kubernetes.io/projected/d76bc42a-993b-4748-a566-a5efb43bf20a-kube-api-access-dm2dq\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.372772 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm2dq\" (UniqueName: \"kubernetes.io/projected/d76bc42a-993b-4748-a566-a5efb43bf20a-kube-api-access-dm2dq\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.372944 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-bundle\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.373002 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-util\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.374094 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-bundle\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.374132 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-util\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.409416 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm2dq\" (UniqueName: \"kubernetes.io/projected/d76bc42a-993b-4748-a566-a5efb43bf20a-kube-api-access-dm2dq\") pod \"170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.488646 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:21 crc kubenswrapper[4954]: I1128 16:28:21.742559 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc"] Nov 28 16:28:21 crc kubenswrapper[4954]: W1128 16:28:21.754125 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd76bc42a_993b_4748_a566_a5efb43bf20a.slice/crio-d4b2338d3b4c7f7cb5486cc3b81938e61767d39475a3d4b32df0d959d53e9bcb WatchSource:0}: Error finding container d4b2338d3b4c7f7cb5486cc3b81938e61767d39475a3d4b32df0d959d53e9bcb: Status 404 returned error can't find the container with id d4b2338d3b4c7f7cb5486cc3b81938e61767d39475a3d4b32df0d959d53e9bcb Nov 28 16:28:22 crc kubenswrapper[4954]: I1128 16:28:22.653441 4954 generic.go:334] "Generic (PLEG): container finished" podID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerID="4671416bb3f94bb0c841eac7e8ae72510de5588b2cb1dfc7befe39a409ee572d" exitCode=0 Nov 28 16:28:22 crc kubenswrapper[4954]: I1128 16:28:22.653495 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" event={"ID":"d76bc42a-993b-4748-a566-a5efb43bf20a","Type":"ContainerDied","Data":"4671416bb3f94bb0c841eac7e8ae72510de5588b2cb1dfc7befe39a409ee572d"} Nov 28 16:28:22 crc kubenswrapper[4954]: I1128 16:28:22.653757 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" event={"ID":"d76bc42a-993b-4748-a566-a5efb43bf20a","Type":"ContainerStarted","Data":"d4b2338d3b4c7f7cb5486cc3b81938e61767d39475a3d4b32df0d959d53e9bcb"} Nov 28 16:28:24 crc kubenswrapper[4954]: I1128 16:28:24.671820 4954 generic.go:334] "Generic (PLEG): container finished" podID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerID="38bd716bb5fc94c996cc6f8fead858dad3e1791a2d2d549547862d61833079bb" exitCode=0 Nov 28 16:28:24 crc kubenswrapper[4954]: I1128 16:28:24.671915 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" event={"ID":"d76bc42a-993b-4748-a566-a5efb43bf20a","Type":"ContainerDied","Data":"38bd716bb5fc94c996cc6f8fead858dad3e1791a2d2d549547862d61833079bb"} Nov 28 16:28:25 crc kubenswrapper[4954]: I1128 16:28:25.681509 4954 generic.go:334] "Generic (PLEG): container finished" podID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerID="a7106e17415bee7fe9b875ade7a40c01ac8bb843eeed86b70c13c031152dc881" exitCode=0 Nov 28 16:28:25 crc kubenswrapper[4954]: I1128 16:28:25.681592 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" event={"ID":"d76bc42a-993b-4748-a566-a5efb43bf20a","Type":"ContainerDied","Data":"a7106e17415bee7fe9b875ade7a40c01ac8bb843eeed86b70c13c031152dc881"} Nov 28 16:28:26 crc kubenswrapper[4954]: I1128 16:28:26.971220 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.150788 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-util\") pod \"d76bc42a-993b-4748-a566-a5efb43bf20a\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.150884 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-bundle\") pod \"d76bc42a-993b-4748-a566-a5efb43bf20a\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.150920 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dm2dq\" (UniqueName: \"kubernetes.io/projected/d76bc42a-993b-4748-a566-a5efb43bf20a-kube-api-access-dm2dq\") pod \"d76bc42a-993b-4748-a566-a5efb43bf20a\" (UID: \"d76bc42a-993b-4748-a566-a5efb43bf20a\") " Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.152008 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-bundle" (OuterVolumeSpecName: "bundle") pod "d76bc42a-993b-4748-a566-a5efb43bf20a" (UID: "d76bc42a-993b-4748-a566-a5efb43bf20a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.160716 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d76bc42a-993b-4748-a566-a5efb43bf20a-kube-api-access-dm2dq" (OuterVolumeSpecName: "kube-api-access-dm2dq") pod "d76bc42a-993b-4748-a566-a5efb43bf20a" (UID: "d76bc42a-993b-4748-a566-a5efb43bf20a"). InnerVolumeSpecName "kube-api-access-dm2dq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.166479 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-util" (OuterVolumeSpecName: "util") pod "d76bc42a-993b-4748-a566-a5efb43bf20a" (UID: "d76bc42a-993b-4748-a566-a5efb43bf20a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.252798 4954 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-util\") on node \"crc\" DevicePath \"\"" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.252849 4954 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d76bc42a-993b-4748-a566-a5efb43bf20a-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.252862 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dm2dq\" (UniqueName: \"kubernetes.io/projected/d76bc42a-993b-4748-a566-a5efb43bf20a-kube-api-access-dm2dq\") on node \"crc\" DevicePath \"\"" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.699347 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" event={"ID":"d76bc42a-993b-4748-a566-a5efb43bf20a","Type":"ContainerDied","Data":"d4b2338d3b4c7f7cb5486cc3b81938e61767d39475a3d4b32df0d959d53e9bcb"} Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.699383 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc" Nov 28 16:28:27 crc kubenswrapper[4954]: I1128 16:28:27.699392 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4b2338d3b4c7f7cb5486cc3b81938e61767d39475a3d4b32df0d959d53e9bcb" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.316400 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv"] Nov 28 16:28:33 crc kubenswrapper[4954]: E1128 16:28:33.317268 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerName="pull" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.317285 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerName="pull" Nov 28 16:28:33 crc kubenswrapper[4954]: E1128 16:28:33.317295 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerName="extract" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.317301 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerName="extract" Nov 28 16:28:33 crc kubenswrapper[4954]: E1128 16:28:33.317320 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerName="util" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.317328 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerName="util" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.317459 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d76bc42a-993b-4748-a566-a5efb43bf20a" containerName="extract" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.317955 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.329190 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-bh9kn" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.343454 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv"] Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.427648 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh7m2\" (UniqueName: \"kubernetes.io/projected/472bd715-386a-403e-9885-56d830589114-kube-api-access-bh7m2\") pod \"openstack-operator-controller-operator-6fcddf5ccf-nk2nv\" (UID: \"472bd715-386a-403e-9885-56d830589114\") " pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.528793 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh7m2\" (UniqueName: \"kubernetes.io/projected/472bd715-386a-403e-9885-56d830589114-kube-api-access-bh7m2\") pod \"openstack-operator-controller-operator-6fcddf5ccf-nk2nv\" (UID: \"472bd715-386a-403e-9885-56d830589114\") " pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.548788 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh7m2\" (UniqueName: \"kubernetes.io/projected/472bd715-386a-403e-9885-56d830589114-kube-api-access-bh7m2\") pod \"openstack-operator-controller-operator-6fcddf5ccf-nk2nv\" (UID: \"472bd715-386a-403e-9885-56d830589114\") " pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.635913 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" Nov 28 16:28:33 crc kubenswrapper[4954]: I1128 16:28:33.880247 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv"] Nov 28 16:28:34 crc kubenswrapper[4954]: I1128 16:28:34.750232 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" event={"ID":"472bd715-386a-403e-9885-56d830589114","Type":"ContainerStarted","Data":"4edcd9ae922e01d267a3bfb0e58cc4a8e0321a0c269a92ef5c5d46ba71e53a17"} Nov 28 16:28:39 crc kubenswrapper[4954]: I1128 16:28:39.784854 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" event={"ID":"472bd715-386a-403e-9885-56d830589114","Type":"ContainerStarted","Data":"b354c121bc55c8c022de677d969368eed289784651f4b370b27de3e14affb97f"} Nov 28 16:28:40 crc kubenswrapper[4954]: I1128 16:28:40.790051 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" Nov 28 16:28:40 crc kubenswrapper[4954]: I1128 16:28:40.823400 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" podStartSLOduration=2.084822195 podStartE2EDuration="7.823380904s" podCreationTimestamp="2025-11-28 16:28:33 +0000 UTC" firstStartedPulling="2025-11-28 16:28:33.872671317 +0000 UTC m=+1067.264339858" lastFinishedPulling="2025-11-28 16:28:39.611230026 +0000 UTC m=+1073.002898567" observedRunningTime="2025-11-28 16:28:40.821163095 +0000 UTC m=+1074.212831646" watchObservedRunningTime="2025-11-28 16:28:40.823380904 +0000 UTC m=+1074.215049445" Nov 28 16:28:53 crc kubenswrapper[4954]: I1128 16:28:53.639946 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-6fcddf5ccf-nk2nv" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.314955 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.316584 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.322422 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-vjsgv" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.326055 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.326962 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.332025 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.365700 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-tjrpc" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.371747 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.386638 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.388088 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.394305 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-b957w" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.394811 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.405105 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7n95\" (UniqueName: \"kubernetes.io/projected/22df58cc-9b57-40fd-a1fd-6dfb287daea1-kube-api-access-v7n95\") pod \"cinder-operator-controller-manager-859b6ccc6-dxqks\" (UID: \"22df58cc-9b57-40fd-a1fd-6dfb287daea1\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.405178 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnwj4\" (UniqueName: \"kubernetes.io/projected/273ea62c-4545-4d8e-b831-ad40b8413f6d-kube-api-access-lnwj4\") pod \"barbican-operator-controller-manager-7d9dfd778-vd2qv\" (UID: \"273ea62c-4545-4d8e-b831-ad40b8413f6d\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.415163 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.416731 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.419842 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-6bzjr" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.423585 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.425115 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.427969 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-6fxdj" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.430115 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.439325 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.455167 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.456257 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.466432 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qbjzp" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.466597 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.468835 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.469838 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.480514 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-fkdq5" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.480642 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.491416 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.496708 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-7pd5d" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.514517 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-285wz\" (UniqueName: \"kubernetes.io/projected/ac397e56-94ea-4618-8516-dba8864212ef-kube-api-access-285wz\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.514600 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqdmx\" (UniqueName: \"kubernetes.io/projected/1aff6577-6109-4234-8300-62afe871125b-kube-api-access-dqdmx\") pod \"heat-operator-controller-manager-5f64f6f8bb-kvcbm\" (UID: \"1aff6577-6109-4234-8300-62afe871125b\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.514758 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.514846 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7n95\" (UniqueName: \"kubernetes.io/projected/22df58cc-9b57-40fd-a1fd-6dfb287daea1-kube-api-access-v7n95\") pod \"cinder-operator-controller-manager-859b6ccc6-dxqks\" (UID: \"22df58cc-9b57-40fd-a1fd-6dfb287daea1\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.514930 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwdch\" (UniqueName: \"kubernetes.io/projected/8be7ca71-bdf4-42ca-8593-f5ad8ae1404c-kube-api-access-jwdch\") pod \"glance-operator-controller-manager-668d9c48b9-jpsg7\" (UID: \"8be7ca71-bdf4-42ca-8593-f5ad8ae1404c\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.514993 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnwj4\" (UniqueName: \"kubernetes.io/projected/273ea62c-4545-4d8e-b831-ad40b8413f6d-kube-api-access-lnwj4\") pod \"barbican-operator-controller-manager-7d9dfd778-vd2qv\" (UID: \"273ea62c-4545-4d8e-b831-ad40b8413f6d\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.515047 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ftxs\" (UniqueName: \"kubernetes.io/projected/2a678943-ab1c-46a3-89d9-03cec5259ecc-kube-api-access-5ftxs\") pod \"designate-operator-controller-manager-78b4bc895b-frclp\" (UID: \"2a678943-ab1c-46a3-89d9-03cec5259ecc\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.521048 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.552517 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.564662 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.576642 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnwj4\" (UniqueName: \"kubernetes.io/projected/273ea62c-4545-4d8e-b831-ad40b8413f6d-kube-api-access-lnwj4\") pod \"barbican-operator-controller-manager-7d9dfd778-vd2qv\" (UID: \"273ea62c-4545-4d8e-b831-ad40b8413f6d\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.586977 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7n95\" (UniqueName: \"kubernetes.io/projected/22df58cc-9b57-40fd-a1fd-6dfb287daea1-kube-api-access-v7n95\") pod \"cinder-operator-controller-manager-859b6ccc6-dxqks\" (UID: \"22df58cc-9b57-40fd-a1fd-6dfb287daea1\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.612713 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.613971 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.615737 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-lz8s9" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.616551 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-285wz\" (UniqueName: \"kubernetes.io/projected/ac397e56-94ea-4618-8516-dba8864212ef-kube-api-access-285wz\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.616615 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t7nl\" (UniqueName: \"kubernetes.io/projected/65c96ed2-5a4a-4f4e-898d-3f3705d42250-kube-api-access-7t7nl\") pod \"ironic-operator-controller-manager-6c548fd776-8jp7l\" (UID: \"65c96ed2-5a4a-4f4e-898d-3f3705d42250\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.616650 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqdmx\" (UniqueName: \"kubernetes.io/projected/1aff6577-6109-4234-8300-62afe871125b-kube-api-access-dqdmx\") pod \"heat-operator-controller-manager-5f64f6f8bb-kvcbm\" (UID: \"1aff6577-6109-4234-8300-62afe871125b\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.616696 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.616748 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7m64\" (UniqueName: \"kubernetes.io/projected/3c9ab6ab-bcb8-4129-bd15-83a08cf28f66-kube-api-access-s7m64\") pod \"horizon-operator-controller-manager-68c6d99b8f-xl989\" (UID: \"3c9ab6ab-bcb8-4129-bd15-83a08cf28f66\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.616776 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwdch\" (UniqueName: \"kubernetes.io/projected/8be7ca71-bdf4-42ca-8593-f5ad8ae1404c-kube-api-access-jwdch\") pod \"glance-operator-controller-manager-668d9c48b9-jpsg7\" (UID: \"8be7ca71-bdf4-42ca-8593-f5ad8ae1404c\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.616802 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ftxs\" (UniqueName: \"kubernetes.io/projected/2a678943-ab1c-46a3-89d9-03cec5259ecc-kube-api-access-5ftxs\") pod \"designate-operator-controller-manager-78b4bc895b-frclp\" (UID: \"2a678943-ab1c-46a3-89d9-03cec5259ecc\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" Nov 28 16:29:12 crc kubenswrapper[4954]: E1128 16:29:12.617128 4954 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:12 crc kubenswrapper[4954]: E1128 16:29:12.617182 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert podName:ac397e56-94ea-4618-8516-dba8864212ef nodeName:}" failed. No retries permitted until 2025-11-28 16:29:13.117165144 +0000 UTC m=+1106.508833685 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert") pod "infra-operator-controller-manager-57548d458d-c6pr6" (UID: "ac397e56-94ea-4618-8516-dba8864212ef") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.625268 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.634506 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.636418 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.641069 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-vgz5b" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.654969 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.665939 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.668503 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.676416 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-b4vxb" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.683519 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.688125 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwdch\" (UniqueName: \"kubernetes.io/projected/8be7ca71-bdf4-42ca-8593-f5ad8ae1404c-kube-api-access-jwdch\") pod \"glance-operator-controller-manager-668d9c48b9-jpsg7\" (UID: \"8be7ca71-bdf4-42ca-8593-f5ad8ae1404c\") " pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.689800 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqdmx\" (UniqueName: \"kubernetes.io/projected/1aff6577-6109-4234-8300-62afe871125b-kube-api-access-dqdmx\") pod \"heat-operator-controller-manager-5f64f6f8bb-kvcbm\" (UID: \"1aff6577-6109-4234-8300-62afe871125b\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.696136 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.697179 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.705827 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.709103 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.711817 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-285wz\" (UniqueName: \"kubernetes.io/projected/ac397e56-94ea-4618-8516-dba8864212ef-kube-api-access-285wz\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.714317 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-qjk94" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.716438 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.718080 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ftxs\" (UniqueName: \"kubernetes.io/projected/2a678943-ab1c-46a3-89d9-03cec5259ecc-kube-api-access-5ftxs\") pod \"designate-operator-controller-manager-78b4bc895b-frclp\" (UID: \"2a678943-ab1c-46a3-89d9-03cec5259ecc\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.719370 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwphb\" (UniqueName: \"kubernetes.io/projected/801feb18-7515-4b28-a364-23c2ad1d29a2-kube-api-access-mwphb\") pod \"keystone-operator-controller-manager-546d4bdf48-nkpj2\" (UID: \"801feb18-7515-4b28-a364-23c2ad1d29a2\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.719463 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clmtv\" (UniqueName: \"kubernetes.io/projected/9689608c-a2b5-49cd-a758-14d120acd0f4-kube-api-access-clmtv\") pod \"mariadb-operator-controller-manager-56bbcc9d85-9smv8\" (UID: \"9689608c-a2b5-49cd-a758-14d120acd0f4\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.719494 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsdsg\" (UniqueName: \"kubernetes.io/projected/1e466f0e-3bcc-4812-9939-90e852e54c6e-kube-api-access-lsdsg\") pod \"manila-operator-controller-manager-6546668bfd-svnnf\" (UID: \"1e466f0e-3bcc-4812-9939-90e852e54c6e\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.719548 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t7nl\" (UniqueName: \"kubernetes.io/projected/65c96ed2-5a4a-4f4e-898d-3f3705d42250-kube-api-access-7t7nl\") pod \"ironic-operator-controller-manager-6c548fd776-8jp7l\" (UID: \"65c96ed2-5a4a-4f4e-898d-3f3705d42250\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.719628 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7m64\" (UniqueName: \"kubernetes.io/projected/3c9ab6ab-bcb8-4129-bd15-83a08cf28f66-kube-api-access-s7m64\") pod \"horizon-operator-controller-manager-68c6d99b8f-xl989\" (UID: \"3c9ab6ab-bcb8-4129-bd15-83a08cf28f66\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.721348 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.722769 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-swzpq" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.742687 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.751592 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t7nl\" (UniqueName: \"kubernetes.io/projected/65c96ed2-5a4a-4f4e-898d-3f3705d42250-kube-api-access-7t7nl\") pod \"ironic-operator-controller-manager-6c548fd776-8jp7l\" (UID: \"65c96ed2-5a4a-4f4e-898d-3f3705d42250\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.751962 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.752644 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.778810 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.790187 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7m64\" (UniqueName: \"kubernetes.io/projected/3c9ab6ab-bcb8-4129-bd15-83a08cf28f66-kube-api-access-s7m64\") pod \"horizon-operator-controller-manager-68c6d99b8f-xl989\" (UID: \"3c9ab6ab-bcb8-4129-bd15-83a08cf28f66\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.800268 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.802649 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.809101 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.821014 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77w7g\" (UniqueName: \"kubernetes.io/projected/b6a73982-7f81-4947-938d-331487f421e0-kube-api-access-77w7g\") pod \"nova-operator-controller-manager-697bc559fc-mbhfd\" (UID: \"b6a73982-7f81-4947-938d-331487f421e0\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.821082 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clmtv\" (UniqueName: \"kubernetes.io/projected/9689608c-a2b5-49cd-a758-14d120acd0f4-kube-api-access-clmtv\") pod \"mariadb-operator-controller-manager-56bbcc9d85-9smv8\" (UID: \"9689608c-a2b5-49cd-a758-14d120acd0f4\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.821109 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsdsg\" (UniqueName: \"kubernetes.io/projected/1e466f0e-3bcc-4812-9939-90e852e54c6e-kube-api-access-lsdsg\") pod \"manila-operator-controller-manager-6546668bfd-svnnf\" (UID: \"1e466f0e-3bcc-4812-9939-90e852e54c6e\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.821206 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwphb\" (UniqueName: \"kubernetes.io/projected/801feb18-7515-4b28-a364-23c2ad1d29a2-kube-api-access-mwphb\") pod \"keystone-operator-controller-manager-546d4bdf48-nkpj2\" (UID: \"801feb18-7515-4b28-a364-23c2ad1d29a2\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.821229 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zg9gk\" (UniqueName: \"kubernetes.io/projected/1bf94f9e-ea95-4f86-9cc6-3bc27093b601-kube-api-access-zg9gk\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-stknf\" (UID: \"1bf94f9e-ea95-4f86-9cc6-3bc27093b601\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.824254 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.827875 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-cr9rs" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.828830 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.850039 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsdsg\" (UniqueName: \"kubernetes.io/projected/1e466f0e-3bcc-4812-9939-90e852e54c6e-kube-api-access-lsdsg\") pod \"manila-operator-controller-manager-6546668bfd-svnnf\" (UID: \"1e466f0e-3bcc-4812-9939-90e852e54c6e\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.853489 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.860642 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.862223 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clmtv\" (UniqueName: \"kubernetes.io/projected/9689608c-a2b5-49cd-a758-14d120acd0f4-kube-api-access-clmtv\") pod \"mariadb-operator-controller-manager-56bbcc9d85-9smv8\" (UID: \"9689608c-a2b5-49cd-a758-14d120acd0f4\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.860858 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.865090 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwphb\" (UniqueName: \"kubernetes.io/projected/801feb18-7515-4b28-a364-23c2ad1d29a2-kube-api-access-mwphb\") pod \"keystone-operator-controller-manager-546d4bdf48-nkpj2\" (UID: \"801feb18-7515-4b28-a364-23c2ad1d29a2\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.867750 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.871801 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-d72pj" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.872127 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.873926 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.878841 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-6wrcc" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.882915 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2"] Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.888426 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.890593 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-6ldsr" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.922066 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r5q2\" (UniqueName: \"kubernetes.io/projected/ba448a3d-8e7a-4d75-8853-8cf28954b48d-kube-api-access-5r5q2\") pod \"ovn-operator-controller-manager-b6456fdb6-9vjc5\" (UID: \"ba448a3d-8e7a-4d75-8853-8cf28954b48d\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.922141 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zg9gk\" (UniqueName: \"kubernetes.io/projected/1bf94f9e-ea95-4f86-9cc6-3bc27093b601-kube-api-access-zg9gk\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-stknf\" (UID: \"1bf94f9e-ea95-4f86-9cc6-3bc27093b601\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.922173 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wkqp\" (UniqueName: \"kubernetes.io/projected/1a866879-c10a-477b-8e98-322e057ea708-kube-api-access-4wkqp\") pod \"octavia-operator-controller-manager-998648c74-jrqz6\" (UID: \"1a866879-c10a-477b-8e98-322e057ea708\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.922275 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77w7g\" (UniqueName: \"kubernetes.io/projected/b6a73982-7f81-4947-938d-331487f421e0-kube-api-access-77w7g\") pod \"nova-operator-controller-manager-697bc559fc-mbhfd\" (UID: \"b6a73982-7f81-4947-938d-331487f421e0\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.922338 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.922386 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbxkc\" (UniqueName: \"kubernetes.io/projected/b2643938-ed66-4bf8-b080-24b3f8149d15-kube-api-access-vbxkc\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.958034 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" Nov 28 16:29:12 crc kubenswrapper[4954]: I1128 16:29:12.964138 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:12.992832 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77w7g\" (UniqueName: \"kubernetes.io/projected/b6a73982-7f81-4947-938d-331487f421e0-kube-api-access-77w7g\") pod \"nova-operator-controller-manager-697bc559fc-mbhfd\" (UID: \"b6a73982-7f81-4947-938d-331487f421e0\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.087499 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.100161 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.105349 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.105920 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zg9gk\" (UniqueName: \"kubernetes.io/projected/1bf94f9e-ea95-4f86-9cc6-3bc27093b601-kube-api-access-zg9gk\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-stknf\" (UID: \"1bf94f9e-ea95-4f86-9cc6-3bc27093b601\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.125767 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r5q2\" (UniqueName: \"kubernetes.io/projected/ba448a3d-8e7a-4d75-8853-8cf28954b48d-kube-api-access-5r5q2\") pod \"ovn-operator-controller-manager-b6456fdb6-9vjc5\" (UID: \"ba448a3d-8e7a-4d75-8853-8cf28954b48d\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.126250 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wkqp\" (UniqueName: \"kubernetes.io/projected/1a866879-c10a-477b-8e98-322e057ea708-kube-api-access-4wkqp\") pod \"octavia-operator-controller-manager-998648c74-jrqz6\" (UID: \"1a866879-c10a-477b-8e98-322e057ea708\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.126569 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.126820 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.126852 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbxkc\" (UniqueName: \"kubernetes.io/projected/b2643938-ed66-4bf8-b080-24b3f8149d15-kube-api-access-vbxkc\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.126824 4954 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.127683 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert podName:b2643938-ed66-4bf8-b080-24b3f8149d15 nodeName:}" failed. No retries permitted until 2025-11-28 16:29:13.627666081 +0000 UTC m=+1107.019334612 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" (UID: "b2643938-ed66-4bf8-b080-24b3f8149d15") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.127830 4954 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.127909 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert podName:ac397e56-94ea-4618-8516-dba8864212ef nodeName:}" failed. No retries permitted until 2025-11-28 16:29:14.127874487 +0000 UTC m=+1107.519543028 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert") pod "infra-operator-controller-manager-57548d458d-c6pr6" (UID: "ac397e56-94ea-4618-8516-dba8864212ef") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.140974 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.149278 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.155250 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.156479 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.163481 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbxkc\" (UniqueName: \"kubernetes.io/projected/b2643938-ed66-4bf8-b080-24b3f8149d15-kube-api-access-vbxkc\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.173517 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r5q2\" (UniqueName: \"kubernetes.io/projected/ba448a3d-8e7a-4d75-8853-8cf28954b48d-kube-api-access-5r5q2\") pod \"ovn-operator-controller-manager-b6456fdb6-9vjc5\" (UID: \"ba448a3d-8e7a-4d75-8853-8cf28954b48d\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.174219 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-sxjf2" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.185908 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wkqp\" (UniqueName: \"kubernetes.io/projected/1a866879-c10a-477b-8e98-322e057ea708-kube-api-access-4wkqp\") pod \"octavia-operator-controller-manager-998648c74-jrqz6\" (UID: \"1a866879-c10a-477b-8e98-322e057ea708\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.191942 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.208202 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.253804 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.280405 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqmcj\" (UniqueName: \"kubernetes.io/projected/2023394a-92c9-4f1e-8823-f0e33c9381b3-kube-api-access-wqmcj\") pod \"placement-operator-controller-manager-78f8948974-8wvs2\" (UID: \"2023394a-92c9-4f1e-8823-f0e33c9381b3\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.309799 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.313986 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.321032 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-pmx64" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.346631 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.369241 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.370572 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.374097 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-cgllv" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.380517 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.392993 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.397633 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqmcj\" (UniqueName: \"kubernetes.io/projected/2023394a-92c9-4f1e-8823-f0e33c9381b3-kube-api-access-wqmcj\") pod \"placement-operator-controller-manager-78f8948974-8wvs2\" (UID: \"2023394a-92c9-4f1e-8823-f0e33c9381b3\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.397829 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4n55\" (UniqueName: \"kubernetes.io/projected/a2541e1b-97ee-4c01-999e-1f62adba25b6-kube-api-access-m4n55\") pod \"swift-operator-controller-manager-5f8c65bbfc-wcz2l\" (UID: \"a2541e1b-97ee-4c01-999e-1f62adba25b6\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.421669 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqmcj\" (UniqueName: \"kubernetes.io/projected/2023394a-92c9-4f1e-8823-f0e33c9381b3-kube-api-access-wqmcj\") pod \"placement-operator-controller-manager-78f8948974-8wvs2\" (UID: \"2023394a-92c9-4f1e-8823-f0e33c9381b3\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.424860 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.427313 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.436318 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-vmbkk" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.437610 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.457990 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.460173 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.463347 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.463908 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.464455 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-2khm2" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.465747 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.471859 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.473157 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.483782 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-d5ndp" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.489342 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499268 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499347 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgqfg\" (UniqueName: \"kubernetes.io/projected/d036c938-f2af-483f-b0a7-2a00101a5e7f-kube-api-access-sgqfg\") pod \"test-operator-controller-manager-5854674fcc-7fjtc\" (UID: \"d036c938-f2af-483f-b0a7-2a00101a5e7f\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499377 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m87m\" (UniqueName: \"kubernetes.io/projected/64e36e10-c08f-481d-9fc1-c08d7a54d72d-kube-api-access-8m87m\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499412 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bjtp\" (UniqueName: \"kubernetes.io/projected/92c27f55-05f6-4359-8ec8-9fa067172b51-kube-api-access-9bjtp\") pod \"telemetry-operator-controller-manager-76cc84c6bb-vgcrb\" (UID: \"92c27f55-05f6-4359-8ec8-9fa067172b51\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499438 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvhv5\" (UniqueName: \"kubernetes.io/projected/ac579e75-f040-4640-ac48-7760cbce72ed-kube-api-access-lvhv5\") pod \"watcher-operator-controller-manager-769dc69bc-62lbb\" (UID: \"ac579e75-f040-4640-ac48-7760cbce72ed\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499539 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499651 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4n55\" (UniqueName: \"kubernetes.io/projected/a2541e1b-97ee-4c01-999e-1f62adba25b6-kube-api-access-m4n55\") pod \"swift-operator-controller-manager-5f8c65bbfc-wcz2l\" (UID: \"a2541e1b-97ee-4c01-999e-1f62adba25b6\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.499694 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwhtf\" (UniqueName: \"kubernetes.io/projected/73b9fb56-c718-41db-a043-82defc274674-kube-api-access-wwhtf\") pod \"rabbitmq-cluster-operator-manager-668c99d594-784hq\" (UID: \"73b9fb56-c718-41db-a043-82defc274674\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.515667 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.537004 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4n55\" (UniqueName: \"kubernetes.io/projected/a2541e1b-97ee-4c01-999e-1f62adba25b6-kube-api-access-m4n55\") pod \"swift-operator-controller-manager-5f8c65bbfc-wcz2l\" (UID: \"a2541e1b-97ee-4c01-999e-1f62adba25b6\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.558390 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.600880 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgqfg\" (UniqueName: \"kubernetes.io/projected/d036c938-f2af-483f-b0a7-2a00101a5e7f-kube-api-access-sgqfg\") pod \"test-operator-controller-manager-5854674fcc-7fjtc\" (UID: \"d036c938-f2af-483f-b0a7-2a00101a5e7f\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.601047 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m87m\" (UniqueName: \"kubernetes.io/projected/64e36e10-c08f-481d-9fc1-c08d7a54d72d-kube-api-access-8m87m\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.601082 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bjtp\" (UniqueName: \"kubernetes.io/projected/92c27f55-05f6-4359-8ec8-9fa067172b51-kube-api-access-9bjtp\") pod \"telemetry-operator-controller-manager-76cc84c6bb-vgcrb\" (UID: \"92c27f55-05f6-4359-8ec8-9fa067172b51\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.601291 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvhv5\" (UniqueName: \"kubernetes.io/projected/ac579e75-f040-4640-ac48-7760cbce72ed-kube-api-access-lvhv5\") pod \"watcher-operator-controller-manager-769dc69bc-62lbb\" (UID: \"ac579e75-f040-4640-ac48-7760cbce72ed\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.601325 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.601405 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwhtf\" (UniqueName: \"kubernetes.io/projected/73b9fb56-c718-41db-a043-82defc274674-kube-api-access-wwhtf\") pod \"rabbitmq-cluster-operator-manager-668c99d594-784hq\" (UID: \"73b9fb56-c718-41db-a043-82defc274674\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.601939 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.602099 4954 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.602156 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:14.102135809 +0000 UTC m=+1107.493804350 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.602406 4954 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.602440 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:14.102427888 +0000 UTC m=+1107.494096429 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "metrics-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.625601 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwhtf\" (UniqueName: \"kubernetes.io/projected/73b9fb56-c718-41db-a043-82defc274674-kube-api-access-wwhtf\") pod \"rabbitmq-cluster-operator-manager-668c99d594-784hq\" (UID: \"73b9fb56-c718-41db-a043-82defc274674\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.629559 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgqfg\" (UniqueName: \"kubernetes.io/projected/d036c938-f2af-483f-b0a7-2a00101a5e7f-kube-api-access-sgqfg\") pod \"test-operator-controller-manager-5854674fcc-7fjtc\" (UID: \"d036c938-f2af-483f-b0a7-2a00101a5e7f\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.633777 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bjtp\" (UniqueName: \"kubernetes.io/projected/92c27f55-05f6-4359-8ec8-9fa067172b51-kube-api-access-9bjtp\") pod \"telemetry-operator-controller-manager-76cc84c6bb-vgcrb\" (UID: \"92c27f55-05f6-4359-8ec8-9fa067172b51\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.634356 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvhv5\" (UniqueName: \"kubernetes.io/projected/ac579e75-f040-4640-ac48-7760cbce72ed-kube-api-access-lvhv5\") pod \"watcher-operator-controller-manager-769dc69bc-62lbb\" (UID: \"ac579e75-f040-4640-ac48-7760cbce72ed\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.638696 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv"] Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.639452 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m87m\" (UniqueName: \"kubernetes.io/projected/64e36e10-c08f-481d-9fc1-c08d7a54d72d-kube-api-access-8m87m\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.670216 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.703963 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.704106 4954 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: E1128 16:29:13.704164 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert podName:b2643938-ed66-4bf8-b080-24b3f8149d15 nodeName:}" failed. No retries permitted until 2025-11-28 16:29:14.704141563 +0000 UTC m=+1108.095810104 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" (UID: "b2643938-ed66-4bf8-b080-24b3f8149d15") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.712494 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.783792 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.805429 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" Nov 28 16:29:13 crc kubenswrapper[4954]: I1128 16:29:13.806228 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.015409 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.029935 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.036483 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.119304 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.119392 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.119617 4954 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.119681 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:15.119663455 +0000 UTC m=+1108.511332006 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "metrics-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.120049 4954 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.120083 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:15.120073878 +0000 UTC m=+1108.511742419 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "webhook-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.138588 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.142149 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" event={"ID":"8be7ca71-bdf4-42ca-8593-f5ad8ae1404c","Type":"ContainerStarted","Data":"0a0b923815a8a6f227f2187b76d91fb41f73a549819cc9891d6f7a2498ab5880"} Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.143289 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" event={"ID":"1aff6577-6109-4234-8300-62afe871125b","Type":"ContainerStarted","Data":"f5f605a167db956841d14ab19d9706958049800307fba747af2baf80873fca81"} Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.147709 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" event={"ID":"22df58cc-9b57-40fd-a1fd-6dfb287daea1","Type":"ContainerStarted","Data":"1c4d2c4bab132348af50b86f113bac72b490c50a3502969ffd10c71b1c52d16b"} Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.159833 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" event={"ID":"273ea62c-4545-4d8e-b831-ad40b8413f6d","Type":"ContainerStarted","Data":"30d9ca5a34cf2024a6fbdcb280bea2f0149f5c16cedf58cf31d5d9133dbe6413"} Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.176054 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.221973 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.222177 4954 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.222235 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert podName:ac397e56-94ea-4618-8516-dba8864212ef nodeName:}" failed. No retries permitted until 2025-11-28 16:29:16.222216246 +0000 UTC m=+1109.613884787 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert") pod "infra-operator-controller-manager-57548d458d-c6pr6" (UID: "ac397e56-94ea-4618-8516-dba8864212ef") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: W1128 16:29:14.276177 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65c96ed2_5a4a_4f4e_898d_3f3705d42250.slice/crio-acf7ab24fb9a12c63965cff9514afb5171d3f4934a63e5a8de6653f57f6e30b4 WatchSource:0}: Error finding container acf7ab24fb9a12c63965cff9514afb5171d3f4934a63e5a8de6653f57f6e30b4: Status 404 returned error can't find the container with id acf7ab24fb9a12c63965cff9514afb5171d3f4934a63e5a8de6653f57f6e30b4 Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.357514 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp"] Nov 28 16:29:14 crc kubenswrapper[4954]: W1128 16:29:14.394947 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a678943_ab1c_46a3_89d9_03cec5259ecc.slice/crio-81bb06a8da37d82a529caff73ad02a8fc5e316567823f2b42710e833aa8b1f47 WatchSource:0}: Error finding container 81bb06a8da37d82a529caff73ad02a8fc5e316567823f2b42710e833aa8b1f47: Status 404 returned error can't find the container with id 81bb06a8da37d82a529caff73ad02a8fc5e316567823f2b42710e833aa8b1f47 Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.560300 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.570369 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.682293 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.706448 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.718848 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.732462 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.732701 4954 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.732787 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert podName:b2643938-ed66-4bf8-b080-24b3f8149d15 nodeName:}" failed. No retries permitted until 2025-11-28 16:29:16.732769664 +0000 UTC m=+1110.124438205 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" (UID: "b2643938-ed66-4bf8-b080-24b3f8149d15") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.800330 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.805857 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.812549 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.817428 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc"] Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.817916 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9bjtp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-vgcrb_openstack-operators(92c27f55-05f6-4359-8ec8-9fa067172b51): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.820071 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9bjtp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-vgcrb_openstack-operators(92c27f55-05f6-4359-8ec8-9fa067172b51): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.822814 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" podUID="92c27f55-05f6-4359-8ec8-9fa067172b51" Nov 28 16:29:14 crc kubenswrapper[4954]: W1128 16:29:14.823479 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9689608c_a2b5_49cd_a758_14d120acd0f4.slice/crio-a25a185de206198946b0aea0369e6a0b67f89bf6c6acd29299bde773ab9ad433 WatchSource:0}: Error finding container a25a185de206198946b0aea0369e6a0b67f89bf6c6acd29299bde773ab9ad433: Status 404 returned error can't find the container with id a25a185de206198946b0aea0369e6a0b67f89bf6c6acd29299bde773ab9ad433 Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.826334 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-clmtv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-9smv8_openstack-operators(9689608c-a2b5-49cd-a758-14d120acd0f4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.828507 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-clmtv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-9smv8_openstack-operators(9689608c-a2b5-49cd-a758-14d120acd0f4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.830504 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" podUID="9689608c-a2b5-49cd-a758-14d120acd0f4" Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.929128 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.935644 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb"] Nov 28 16:29:14 crc kubenswrapper[4954]: I1128 16:29:14.940563 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l"] Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.951312 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wwhtf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-784hq_openstack-operators(73b9fb56-c718-41db-a043-82defc274674): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.952717 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" podUID="73b9fb56-c718-41db-a043-82defc274674" Nov 28 16:29:14 crc kubenswrapper[4954]: W1128 16:29:14.955168 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2541e1b_97ee_4c01_999e_1f62adba25b6.slice/crio-b0bea8b6687779e7b5f24e027ec68d19e417b7d4836a0492e05f04c19504837d WatchSource:0}: Error finding container b0bea8b6687779e7b5f24e027ec68d19e417b7d4836a0492e05f04c19504837d: Status 404 returned error can't find the container with id b0bea8b6687779e7b5f24e027ec68d19e417b7d4836a0492e05f04c19504837d Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.966640 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m4n55,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-wcz2l_openstack-operators(a2541e1b-97ee-4c01-999e-1f62adba25b6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.970071 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m4n55,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-wcz2l_openstack-operators(a2541e1b-97ee-4c01-999e-1f62adba25b6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 16:29:14 crc kubenswrapper[4954]: E1128 16:29:14.971286 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" podUID="a2541e1b-97ee-4c01-999e-1f62adba25b6" Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.140388 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.140483 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.140715 4954 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.140767 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:17.140750369 +0000 UTC m=+1110.532418910 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "metrics-server-cert" not found Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.141880 4954 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.141960 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:17.141947187 +0000 UTC m=+1110.533615728 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "webhook-server-cert" not found Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.182595 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" event={"ID":"d036c938-f2af-483f-b0a7-2a00101a5e7f","Type":"ContainerStarted","Data":"566c82c73c5726399ccdb71e27c10ae4aff662fdb69ae9918f884f87f5c06a4e"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.185850 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" event={"ID":"2023394a-92c9-4f1e-8823-f0e33c9381b3","Type":"ContainerStarted","Data":"c97f4ad613d944d7cb3f12e4b16c426c7ddd2c87aebf8d64a377bc8b38220e8b"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.188860 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" event={"ID":"2a678943-ab1c-46a3-89d9-03cec5259ecc","Type":"ContainerStarted","Data":"81bb06a8da37d82a529caff73ad02a8fc5e316567823f2b42710e833aa8b1f47"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.192474 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" event={"ID":"3c9ab6ab-bcb8-4129-bd15-83a08cf28f66","Type":"ContainerStarted","Data":"e0a1b87813ccb673fb746d1766f284b680425fdfaddc79b55787a572738de245"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.194935 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" event={"ID":"9689608c-a2b5-49cd-a758-14d120acd0f4","Type":"ContainerStarted","Data":"a25a185de206198946b0aea0369e6a0b67f89bf6c6acd29299bde773ab9ad433"} Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.214756 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" podUID="9689608c-a2b5-49cd-a758-14d120acd0f4" Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.216697 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" event={"ID":"a2541e1b-97ee-4c01-999e-1f62adba25b6","Type":"ContainerStarted","Data":"b0bea8b6687779e7b5f24e027ec68d19e417b7d4836a0492e05f04c19504837d"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.223666 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" event={"ID":"b6a73982-7f81-4947-938d-331487f421e0","Type":"ContainerStarted","Data":"caa85e0f690c0b4afb33ff77923430d242c4c77dc43a2b7f6084278d76cb65a5"} Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.228134 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" podUID="a2541e1b-97ee-4c01-999e-1f62adba25b6" Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.234684 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" event={"ID":"65c96ed2-5a4a-4f4e-898d-3f3705d42250","Type":"ContainerStarted","Data":"acf7ab24fb9a12c63965cff9514afb5171d3f4934a63e5a8de6653f57f6e30b4"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.237407 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" event={"ID":"ba448a3d-8e7a-4d75-8853-8cf28954b48d","Type":"ContainerStarted","Data":"1a3fed77dbbadee56427e4c09e9ef4487f182926507a0f6ca025849839d30d9a"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.238811 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" event={"ID":"1bf94f9e-ea95-4f86-9cc6-3bc27093b601","Type":"ContainerStarted","Data":"7a8e104a0a5944679d80f35fa67d9312feb1786dda5394e58334f36cc5042e76"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.241499 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" event={"ID":"92c27f55-05f6-4359-8ec8-9fa067172b51","Type":"ContainerStarted","Data":"ed1ada2caa7db786054929b1baeedc9008bf05452082d38c9497d483f4f47690"} Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.245673 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" podUID="92c27f55-05f6-4359-8ec8-9fa067172b51" Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.245968 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" event={"ID":"1e466f0e-3bcc-4812-9939-90e852e54c6e","Type":"ContainerStarted","Data":"7601ec257c96bdfc54b092837d857105e36d10aba78738210e0b3b514d9bd6b5"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.253349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" event={"ID":"801feb18-7515-4b28-a364-23c2ad1d29a2","Type":"ContainerStarted","Data":"9be217ddb70db225ba70e2f461a9a06d9c1220d33aa9b6e82d03737547b35dfb"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.255088 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" event={"ID":"1a866879-c10a-477b-8e98-322e057ea708","Type":"ContainerStarted","Data":"5db8846bc6f215263582411c1fd3bb21b9f509b92ecfa86220a10b1201519e44"} Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.257052 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" event={"ID":"73b9fb56-c718-41db-a043-82defc274674","Type":"ContainerStarted","Data":"fbe4e3ee0865dbf455503447af4682c7f6d1498533a474892a86e2874363778a"} Nov 28 16:29:15 crc kubenswrapper[4954]: E1128 16:29:15.258395 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" podUID="73b9fb56-c718-41db-a043-82defc274674" Nov 28 16:29:15 crc kubenswrapper[4954]: I1128 16:29:15.258808 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" event={"ID":"ac579e75-f040-4640-ac48-7760cbce72ed","Type":"ContainerStarted","Data":"8c2e2199b002f33c8b9d19db02349807e836eb05044aa83fea08846ac69dda6c"} Nov 28 16:29:16 crc kubenswrapper[4954]: I1128 16:29:16.256305 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.256449 4954 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.256494 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert podName:ac397e56-94ea-4618-8516-dba8864212ef nodeName:}" failed. No retries permitted until 2025-11-28 16:29:20.256481017 +0000 UTC m=+1113.648149558 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert") pod "infra-operator-controller-manager-57548d458d-c6pr6" (UID: "ac397e56-94ea-4618-8516-dba8864212ef") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.281369 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" podUID="73b9fb56-c718-41db-a043-82defc274674" Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.295497 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" podUID="a2541e1b-97ee-4c01-999e-1f62adba25b6" Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.297779 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" podUID="9689608c-a2b5-49cd-a758-14d120acd0f4" Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.300958 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" podUID="92c27f55-05f6-4359-8ec8-9fa067172b51" Nov 28 16:29:16 crc kubenswrapper[4954]: I1128 16:29:16.765453 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.766277 4954 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:16 crc kubenswrapper[4954]: E1128 16:29:16.766350 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert podName:b2643938-ed66-4bf8-b080-24b3f8149d15 nodeName:}" failed. No retries permitted until 2025-11-28 16:29:20.766329323 +0000 UTC m=+1114.157997874 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" (UID: "b2643938-ed66-4bf8-b080-24b3f8149d15") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:17 crc kubenswrapper[4954]: I1128 16:29:17.171389 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:17 crc kubenswrapper[4954]: I1128 16:29:17.171460 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:17 crc kubenswrapper[4954]: E1128 16:29:17.171603 4954 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:29:17 crc kubenswrapper[4954]: E1128 16:29:17.171659 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:21.171643026 +0000 UTC m=+1114.563311567 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "metrics-server-cert" not found Nov 28 16:29:17 crc kubenswrapper[4954]: E1128 16:29:17.172017 4954 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:29:17 crc kubenswrapper[4954]: E1128 16:29:17.172044 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:21.172037757 +0000 UTC m=+1114.563706298 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "webhook-server-cert" not found Nov 28 16:29:20 crc kubenswrapper[4954]: I1128 16:29:20.307673 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:20 crc kubenswrapper[4954]: E1128 16:29:20.308244 4954 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:20 crc kubenswrapper[4954]: E1128 16:29:20.308318 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert podName:ac397e56-94ea-4618-8516-dba8864212ef nodeName:}" failed. No retries permitted until 2025-11-28 16:29:28.308289738 +0000 UTC m=+1121.699958299 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert") pod "infra-operator-controller-manager-57548d458d-c6pr6" (UID: "ac397e56-94ea-4618-8516-dba8864212ef") : secret "infra-operator-webhook-server-cert" not found Nov 28 16:29:20 crc kubenswrapper[4954]: I1128 16:29:20.819974 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:20 crc kubenswrapper[4954]: E1128 16:29:20.820216 4954 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:20 crc kubenswrapper[4954]: E1128 16:29:20.820269 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert podName:b2643938-ed66-4bf8-b080-24b3f8149d15 nodeName:}" failed. No retries permitted until 2025-11-28 16:29:28.820254489 +0000 UTC m=+1122.211923030 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" (UID: "b2643938-ed66-4bf8-b080-24b3f8149d15") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 16:29:21 crc kubenswrapper[4954]: I1128 16:29:21.229089 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:21 crc kubenswrapper[4954]: I1128 16:29:21.229211 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:21 crc kubenswrapper[4954]: E1128 16:29:21.229248 4954 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:29:21 crc kubenswrapper[4954]: E1128 16:29:21.229303 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:29.229287168 +0000 UTC m=+1122.620955709 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "webhook-server-cert" not found Nov 28 16:29:21 crc kubenswrapper[4954]: E1128 16:29:21.229325 4954 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:29:21 crc kubenswrapper[4954]: E1128 16:29:21.229414 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:29.229401862 +0000 UTC m=+1122.621070403 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "metrics-server-cert" not found Nov 28 16:29:28 crc kubenswrapper[4954]: I1128 16:29:28.381279 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:28 crc kubenswrapper[4954]: I1128 16:29:28.387343 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac397e56-94ea-4618-8516-dba8864212ef-cert\") pod \"infra-operator-controller-manager-57548d458d-c6pr6\" (UID: \"ac397e56-94ea-4618-8516-dba8864212ef\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:28 crc kubenswrapper[4954]: I1128 16:29:28.673385 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:29:28 crc kubenswrapper[4954]: I1128 16:29:28.858636 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:29:28 crc kubenswrapper[4954]: I1128 16:29:28.890269 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:28 crc kubenswrapper[4954]: I1128 16:29:28.899924 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2643938-ed66-4bf8-b080-24b3f8149d15-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd4r279n\" (UID: \"b2643938-ed66-4bf8-b080-24b3f8149d15\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:29 crc kubenswrapper[4954]: I1128 16:29:29.098022 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:29:29 crc kubenswrapper[4954]: I1128 16:29:29.297157 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:29 crc kubenswrapper[4954]: I1128 16:29:29.297234 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:29 crc kubenswrapper[4954]: E1128 16:29:29.297358 4954 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 16:29:29 crc kubenswrapper[4954]: E1128 16:29:29.297376 4954 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 16:29:29 crc kubenswrapper[4954]: E1128 16:29:29.297417 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:45.297401776 +0000 UTC m=+1138.689070317 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "metrics-server-cert" not found Nov 28 16:29:29 crc kubenswrapper[4954]: E1128 16:29:29.297447 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs podName:64e36e10-c08f-481d-9fc1-c08d7a54d72d nodeName:}" failed. No retries permitted until 2025-11-28 16:29:45.297426627 +0000 UTC m=+1138.689095208 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs") pod "openstack-operator-controller-manager-78d5d44766-fx2tm" (UID: "64e36e10-c08f-481d-9fc1-c08d7a54d72d") : secret "webhook-server-cert" not found Nov 28 16:29:32 crc kubenswrapper[4954]: I1128 16:29:32.480885 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:29:32 crc kubenswrapper[4954]: I1128 16:29:32.481323 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:29:33 crc kubenswrapper[4954]: E1128 16:29:33.336786 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429" Nov 28 16:29:33 crc kubenswrapper[4954]: E1128 16:29:33.337240 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dqdmx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5f64f6f8bb-kvcbm_openstack-operators(1aff6577-6109-4234-8300-62afe871125b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:34 crc kubenswrapper[4954]: E1128 16:29:34.198015 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:ecf7be921850bdc04697ed1b332bab39ad2a64e4e45c2a445c04f9bae6ac61b5" Nov 28 16:29:34 crc kubenswrapper[4954]: E1128 16:29:34.198182 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:ecf7be921850bdc04697ed1b332bab39ad2a64e4e45c2a445c04f9bae6ac61b5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lsdsg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-6546668bfd-svnnf_openstack-operators(1e466f0e-3bcc-4812-9939-90e852e54c6e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:35 crc kubenswrapper[4954]: E1128 16:29:35.060419 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea" Nov 28 16:29:35 crc kubenswrapper[4954]: E1128 16:29:35.060660 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:f6059a0fbf031d34dcf086d14ce8c0546caeaee23c5780e90b5037c5feee9fea,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lnwj4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7d9dfd778-vd2qv_openstack-operators(273ea62c-4545-4d8e-b831-ad40b8413f6d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:36 crc kubenswrapper[4954]: E1128 16:29:36.480412 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621" Nov 28 16:29:36 crc kubenswrapper[4954]: E1128 16:29:36.480877 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:9aa8c03633e4b934c57868c1660acf47e7d386ac86bcb344df262c9ad76b8621,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lvhv5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-769dc69bc-62lbb_openstack-operators(ac579e75-f040-4640-ac48-7760cbce72ed): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:37 crc kubenswrapper[4954]: E1128 16:29:37.219448 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168" Nov 28 16:29:37 crc kubenswrapper[4954]: E1128 16:29:37.219659 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4wkqp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-jrqz6_openstack-operators(1a866879-c10a-477b-8e98-322e057ea708): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:37 crc kubenswrapper[4954]: E1128 16:29:37.719789 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557" Nov 28 16:29:37 crc kubenswrapper[4954]: E1128 16:29:37.719963 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zg9gk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-stknf_openstack-operators(1bf94f9e-ea95-4f86-9cc6-3bc27093b601): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:38 crc kubenswrapper[4954]: E1128 16:29:38.253480 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94" Nov 28 16:29:38 crc kubenswrapper[4954]: E1128 16:29:38.253717 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sgqfg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-7fjtc_openstack-operators(d036c938-f2af-483f-b0a7-2a00101a5e7f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:38 crc kubenswrapper[4954]: E1128 16:29:38.960513 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f" Nov 28 16:29:38 crc kubenswrapper[4954]: E1128 16:29:38.960737 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wqmcj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-8wvs2_openstack-operators(2023394a-92c9-4f1e-8823-f0e33c9381b3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:39 crc kubenswrapper[4954]: E1128 16:29:39.593039 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59" Nov 28 16:29:39 crc kubenswrapper[4954]: E1128 16:29:39.593226 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5r5q2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-9vjc5_openstack-operators(ba448a3d-8e7a-4d75-8853-8cf28954b48d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:40 crc kubenswrapper[4954]: E1128 16:29:40.096577 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:1d60701214b39cdb0fa70bbe5710f9b131139a9f4b482c2db4058a04daefb801" Nov 28 16:29:40 crc kubenswrapper[4954]: E1128 16:29:40.096794 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:1d60701214b39cdb0fa70bbe5710f9b131139a9f4b482c2db4058a04daefb801,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v7n95,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-859b6ccc6-dxqks_openstack-operators(22df58cc-9b57-40fd-a1fd-6dfb287daea1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:40 crc kubenswrapper[4954]: E1128 16:29:40.792836 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3" Nov 28 16:29:40 crc kubenswrapper[4954]: E1128 16:29:40.793032 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mwphb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-546d4bdf48-nkpj2_openstack-operators(801feb18-7515-4b28-a364-23c2ad1d29a2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:41 crc kubenswrapper[4954]: E1128 16:29:41.267905 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Nov 28 16:29:41 crc kubenswrapper[4954]: E1128 16:29:41.268121 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-77w7g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-mbhfd_openstack-operators(b6a73982-7f81-4947-938d-331487f421e0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:29:45 crc kubenswrapper[4954]: I1128 16:29:45.356362 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:45 crc kubenswrapper[4954]: I1128 16:29:45.356896 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:45 crc kubenswrapper[4954]: I1128 16:29:45.363691 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-metrics-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:45 crc kubenswrapper[4954]: I1128 16:29:45.363717 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/64e36e10-c08f-481d-9fc1-c08d7a54d72d-webhook-certs\") pod \"openstack-operator-controller-manager-78d5d44766-fx2tm\" (UID: \"64e36e10-c08f-481d-9fc1-c08d7a54d72d\") " pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:45 crc kubenswrapper[4954]: I1128 16:29:45.597631 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:45 crc kubenswrapper[4954]: I1128 16:29:45.790257 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6"] Nov 28 16:29:45 crc kubenswrapper[4954]: I1128 16:29:45.854838 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n"] Nov 28 16:29:46 crc kubenswrapper[4954]: W1128 16:29:46.041872 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac397e56_94ea_4618_8516_dba8864212ef.slice/crio-7adb6329b29cb56bb40a2b7dddea2b5d403c2ec6fd8881364400b97b25de4048 WatchSource:0}: Error finding container 7adb6329b29cb56bb40a2b7dddea2b5d403c2ec6fd8881364400b97b25de4048: Status 404 returned error can't find the container with id 7adb6329b29cb56bb40a2b7dddea2b5d403c2ec6fd8881364400b97b25de4048 Nov 28 16:29:46 crc kubenswrapper[4954]: W1128 16:29:46.166327 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2643938_ed66_4bf8_b080_24b3f8149d15.slice/crio-c1877cc795f129d2e1480b96c36189e61ddfa6259a3f1456aaa0d998b070e16e WatchSource:0}: Error finding container c1877cc795f129d2e1480b96c36189e61ddfa6259a3f1456aaa0d998b070e16e: Status 404 returned error can't find the container with id c1877cc795f129d2e1480b96c36189e61ddfa6259a3f1456aaa0d998b070e16e Nov 28 16:29:46 crc kubenswrapper[4954]: I1128 16:29:46.530908 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" event={"ID":"ac397e56-94ea-4618-8516-dba8864212ef","Type":"ContainerStarted","Data":"7adb6329b29cb56bb40a2b7dddea2b5d403c2ec6fd8881364400b97b25de4048"} Nov 28 16:29:46 crc kubenswrapper[4954]: I1128 16:29:46.532549 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" event={"ID":"65c96ed2-5a4a-4f4e-898d-3f3705d42250","Type":"ContainerStarted","Data":"e596903994773cb6b3011a9a984ee1510a49696a2108be9395e6ed609183b9d4"} Nov 28 16:29:46 crc kubenswrapper[4954]: I1128 16:29:46.536178 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" event={"ID":"b2643938-ed66-4bf8-b080-24b3f8149d15","Type":"ContainerStarted","Data":"c1877cc795f129d2e1480b96c36189e61ddfa6259a3f1456aaa0d998b070e16e"} Nov 28 16:29:47 crc kubenswrapper[4954]: I1128 16:29:47.545146 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" event={"ID":"2a678943-ab1c-46a3-89d9-03cec5259ecc","Type":"ContainerStarted","Data":"16128ca229e71492263d48f706c20dbaedaa507491632ca127320b86ea394f0d"} Nov 28 16:29:47 crc kubenswrapper[4954]: I1128 16:29:47.547076 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" event={"ID":"3c9ab6ab-bcb8-4129-bd15-83a08cf28f66","Type":"ContainerStarted","Data":"bac7b7dc41f03e1bded79ef21aca7c903f32a8b003ad472304111deb89590046"} Nov 28 16:29:47 crc kubenswrapper[4954]: I1128 16:29:47.549223 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" event={"ID":"8be7ca71-bdf4-42ca-8593-f5ad8ae1404c","Type":"ContainerStarted","Data":"8d3265b85fc98a8236387881baff633ba6ada8b561e7e5660498fe68fe2101d3"} Nov 28 16:29:48 crc kubenswrapper[4954]: I1128 16:29:48.563949 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" event={"ID":"a2541e1b-97ee-4c01-999e-1f62adba25b6","Type":"ContainerStarted","Data":"0b45c1ee02c31653af29e726eabf117f8bb2bc73c024a4e4c1b2a62266921de2"} Nov 28 16:29:48 crc kubenswrapper[4954]: I1128 16:29:48.570253 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" event={"ID":"73b9fb56-c718-41db-a043-82defc274674","Type":"ContainerStarted","Data":"afed5283e7317962b6cec794254d2f61f927a2e3e60c811b382478609dc0787d"} Nov 28 16:29:48 crc kubenswrapper[4954]: I1128 16:29:48.573927 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" event={"ID":"92c27f55-05f6-4359-8ec8-9fa067172b51","Type":"ContainerStarted","Data":"ad5d256d3675c747dcebf18091fe1bf6a0719945c3cc643cc26f973e4f4e1e94"} Nov 28 16:29:48 crc kubenswrapper[4954]: I1128 16:29:48.580097 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" event={"ID":"9689608c-a2b5-49cd-a758-14d120acd0f4","Type":"ContainerStarted","Data":"f9c222ab6a04b6dcc67f8ed25d1eae7247dd67728648ff9b7db7f44d865ad268"} Nov 28 16:29:48 crc kubenswrapper[4954]: I1128 16:29:48.748739 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-784hq" podStartSLOduration=4.083847106 podStartE2EDuration="35.748716121s" podCreationTimestamp="2025-11-28 16:29:13 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.951174843 +0000 UTC m=+1108.342843384" lastFinishedPulling="2025-11-28 16:29:46.616043848 +0000 UTC m=+1140.007712399" observedRunningTime="2025-11-28 16:29:48.589900789 +0000 UTC m=+1141.981569340" watchObservedRunningTime="2025-11-28 16:29:48.748716121 +0000 UTC m=+1142.140384662" Nov 28 16:29:48 crc kubenswrapper[4954]: I1128 16:29:48.750772 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm"] Nov 28 16:29:48 crc kubenswrapper[4954]: E1128 16:29:48.860519 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" podUID="ba448a3d-8e7a-4d75-8853-8cf28954b48d" Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.023948 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" podUID="22df58cc-9b57-40fd-a1fd-6dfb287daea1" Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.305822 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" podUID="2023394a-92c9-4f1e-8823-f0e33c9381b3" Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.566897 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" podUID="801feb18-7515-4b28-a364-23c2ad1d29a2" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.622882 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" event={"ID":"92c27f55-05f6-4359-8ec8-9fa067172b51","Type":"ContainerStarted","Data":"3c4d57ec464f8bbf1e032fe94348226dc65f25a4e4db9312a76707216fdc242e"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.623376 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.641273 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" event={"ID":"65c96ed2-5a4a-4f4e-898d-3f3705d42250","Type":"ContainerStarted","Data":"a35515f24f43aab0e832223d0a31809e92337b7bab0c6191855f5795daa9d2c7"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.641646 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.646860 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" podUID="1a866879-c10a-477b-8e98-322e057ea708" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.655791 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" event={"ID":"3c9ab6ab-bcb8-4129-bd15-83a08cf28f66","Type":"ContainerStarted","Data":"08b88daf34db67bdfe63ca4b274e4e67f6d1360ccb6e7550d29f2eee908e0480"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.656433 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.661201 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" event={"ID":"a2541e1b-97ee-4c01-999e-1f62adba25b6","Type":"ContainerStarted","Data":"f2dfd7e95ca34fbba5ad5187887f4ee1a4fbfab28896568553e85624f792ccbf"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.661892 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.679244 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" event={"ID":"801feb18-7515-4b28-a364-23c2ad1d29a2","Type":"ContainerStarted","Data":"4537b0255e5a14bf717adfc4de9e8b13dcdf20506274e49519078bb0ea403daf"} Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.681782 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:986861e5a0a9954f63581d9d55a30f8057883cefea489415d76257774526eea3\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" podUID="801feb18-7515-4b28-a364-23c2ad1d29a2" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.686124 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" event={"ID":"9689608c-a2b5-49cd-a758-14d120acd0f4","Type":"ContainerStarted","Data":"693a8edadf478193627c55dad5996f69aa1afb261a4f60fc962d290099c5f63a"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.686197 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.696748 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" podStartSLOduration=7.171315934 podStartE2EDuration="37.696735199s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.817699373 +0000 UTC m=+1108.209367914" lastFinishedPulling="2025-11-28 16:29:45.343118618 +0000 UTC m=+1138.734787179" observedRunningTime="2025-11-28 16:29:49.691877586 +0000 UTC m=+1143.083546127" watchObservedRunningTime="2025-11-28 16:29:49.696735199 +0000 UTC m=+1143.088403730" Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.711911 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" podUID="1e466f0e-3bcc-4812-9939-90e852e54c6e" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.715762 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" event={"ID":"ba448a3d-8e7a-4d75-8853-8cf28954b48d","Type":"ContainerStarted","Data":"1ab703c8e52e437d6ab491848c36ccc04e8e1d8648d1cbb27ee08b2422557edd"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.729088 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" event={"ID":"64e36e10-c08f-481d-9fc1-c08d7a54d72d","Type":"ContainerStarted","Data":"92825c027bd472577279edc7f0b056e5a19963e63c8e10ab6ed6a16dda962e47"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.729122 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" event={"ID":"64e36e10-c08f-481d-9fc1-c08d7a54d72d","Type":"ContainerStarted","Data":"9d283e3bdb9045a81a454d5f406440564f54b3ae9dae188da8a7f6d33d05a8cc"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.729689 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.740831 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" event={"ID":"22df58cc-9b57-40fd-a1fd-6dfb287daea1","Type":"ContainerStarted","Data":"354c7c02987d02302c14bf9b20cf3504712e9435a05fe2d8ccf825a3998b2fc5"} Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.749761 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:1d60701214b39cdb0fa70bbe5710f9b131139a9f4b482c2db4058a04daefb801\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" podUID="22df58cc-9b57-40fd-a1fd-6dfb287daea1" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.752809 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" podStartSLOduration=7.376043985 podStartE2EDuration="37.752792174s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.96642158 +0000 UTC m=+1108.358090131" lastFinishedPulling="2025-11-28 16:29:45.343169769 +0000 UTC m=+1138.734838320" observedRunningTime="2025-11-28 16:29:49.749515961 +0000 UTC m=+1143.141184502" watchObservedRunningTime="2025-11-28 16:29:49.752792174 +0000 UTC m=+1143.144460715" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.753027 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" event={"ID":"2023394a-92c9-4f1e-8823-f0e33c9381b3","Type":"ContainerStarted","Data":"8ccdae0c4d79b414a3674bfeb516f766c6737dc429b12c5d571c734381688b73"} Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.798878 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" podStartSLOduration=6.007653564 podStartE2EDuration="37.798859046s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.825936621 +0000 UTC m=+1108.217605162" lastFinishedPulling="2025-11-28 16:29:46.617142103 +0000 UTC m=+1140.008810644" observedRunningTime="2025-11-28 16:29:49.790741232 +0000 UTC m=+1143.182409773" watchObservedRunningTime="2025-11-28 16:29:49.798859046 +0000 UTC m=+1143.190527587" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.836631 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" podStartSLOduration=3.152047894 podStartE2EDuration="37.836614219s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.174251924 +0000 UTC m=+1107.565920465" lastFinishedPulling="2025-11-28 16:29:48.858818249 +0000 UTC m=+1142.250486790" observedRunningTime="2025-11-28 16:29:49.832103407 +0000 UTC m=+1143.223771948" watchObservedRunningTime="2025-11-28 16:29:49.836614219 +0000 UTC m=+1143.228282760" Nov 28 16:29:49 crc kubenswrapper[4954]: I1128 16:29:49.908587 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" podStartSLOduration=3.716974884 podStartE2EDuration="37.908571412s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.283904008 +0000 UTC m=+1107.675572549" lastFinishedPulling="2025-11-28 16:29:48.475500536 +0000 UTC m=+1141.867169077" observedRunningTime="2025-11-28 16:29:49.907857909 +0000 UTC m=+1143.299526440" watchObservedRunningTime="2025-11-28 16:29:49.908571412 +0000 UTC m=+1143.300239953" Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.918075 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" podUID="1aff6577-6109-4234-8300-62afe871125b" Nov 28 16:29:49 crc kubenswrapper[4954]: E1128 16:29:49.955033 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" podUID="273ea62c-4545-4d8e-b831-ad40b8413f6d" Nov 28 16:29:50 crc kubenswrapper[4954]: E1128 16:29:50.004719 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" podUID="ac579e75-f040-4640-ac48-7760cbce72ed" Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.018273 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" podStartSLOduration=37.018256477 podStartE2EDuration="37.018256477s" podCreationTimestamp="2025-11-28 16:29:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:29:50.011295858 +0000 UTC m=+1143.402964399" watchObservedRunningTime="2025-11-28 16:29:50.018256477 +0000 UTC m=+1143.409925018" Nov 28 16:29:50 crc kubenswrapper[4954]: E1128 16:29:50.055606 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" podUID="d036c938-f2af-483f-b0a7-2a00101a5e7f" Nov 28 16:29:50 crc kubenswrapper[4954]: E1128 16:29:50.066257 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" podUID="1bf94f9e-ea95-4f86-9cc6-3bc27093b601" Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.760901 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" event={"ID":"d036c938-f2af-483f-b0a7-2a00101a5e7f","Type":"ContainerStarted","Data":"cb5432db435ffdfe8f9ff1fdd4a33bf8fff9e172d56b642c6b62cad82003dc7a"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.764399 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" event={"ID":"8be7ca71-bdf4-42ca-8593-f5ad8ae1404c","Type":"ContainerStarted","Data":"d0a543ca1d680b9adaf77a0926ef25032f129dcaf76746cdf236d2167f2b6891"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.764563 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.766255 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" event={"ID":"1bf94f9e-ea95-4f86-9cc6-3bc27093b601","Type":"ContainerStarted","Data":"b88b4e580c29d22d68862056e523815e589533aeb51b9523dfc1699c0b041531"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.767451 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" event={"ID":"ac579e75-f040-4640-ac48-7760cbce72ed","Type":"ContainerStarted","Data":"72e81b441edd41f404ce61520ee211ed7e94a028904bf854c18596246d404451"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.773107 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" event={"ID":"2a678943-ab1c-46a3-89d9-03cec5259ecc","Type":"ContainerStarted","Data":"f4bb470ff443e34ef4409f4ae3d81b5aba6cf9a436cec45a4812b77f2775bafa"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.773780 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.775168 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" event={"ID":"1e466f0e-3bcc-4812-9939-90e852e54c6e","Type":"ContainerStarted","Data":"03aed13c50c8ed10d42cba2db4619d872a03c9242b4f40601a18c3cf7511e11f"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.789724 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" event={"ID":"1a866879-c10a-477b-8e98-322e057ea708","Type":"ContainerStarted","Data":"fd56f7e1d712685edb55830b46442e33c52c6d197346e7c80839afa09ae7b38d"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.795951 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" event={"ID":"1aff6577-6109-4234-8300-62afe871125b","Type":"ContainerStarted","Data":"faa937037e7d5ab35dfc2defa315a8cdedf610c7c2a1f7216fe22ae2f0b7a8f5"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.805664 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" event={"ID":"273ea62c-4545-4d8e-b831-ad40b8413f6d","Type":"ContainerStarted","Data":"69487d23c29dfc994a9f444e191f2cad1de619369c7fa542ed83e989c18c2ddc"} Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.881893 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" podStartSLOduration=3.447471474 podStartE2EDuration="38.88187201s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:13.616728995 +0000 UTC m=+1107.008397536" lastFinishedPulling="2025-11-28 16:29:49.051129531 +0000 UTC m=+1142.442798072" observedRunningTime="2025-11-28 16:29:50.85885754 +0000 UTC m=+1144.250526101" watchObservedRunningTime="2025-11-28 16:29:50.88187201 +0000 UTC m=+1144.273540551" Nov 28 16:29:50 crc kubenswrapper[4954]: I1128 16:29:50.904906 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" podStartSLOduration=4.027995223 podStartE2EDuration="38.904889691s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.405154275 +0000 UTC m=+1107.796822816" lastFinishedPulling="2025-11-28 16:29:49.282048743 +0000 UTC m=+1142.673717284" observedRunningTime="2025-11-28 16:29:50.896096516 +0000 UTC m=+1144.287765067" watchObservedRunningTime="2025-11-28 16:29:50.904889691 +0000 UTC m=+1144.296558232" Nov 28 16:29:51 crc kubenswrapper[4954]: I1128 16:29:51.822336 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-668d9c48b9-jpsg7" Nov 28 16:29:51 crc kubenswrapper[4954]: I1128 16:29:51.822608 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-frclp" Nov 28 16:29:51 crc kubenswrapper[4954]: I1128 16:29:51.823143 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-8jp7l" Nov 28 16:29:51 crc kubenswrapper[4954]: I1128 16:29:51.823385 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-xl989" Nov 28 16:29:53 crc kubenswrapper[4954]: I1128 16:29:53.103177 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-9smv8" Nov 28 16:29:53 crc kubenswrapper[4954]: I1128 16:29:53.677175 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-vgcrb" Nov 28 16:29:53 crc kubenswrapper[4954]: I1128 16:29:53.809030 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wcz2l" Nov 28 16:29:55 crc kubenswrapper[4954]: I1128 16:29:55.607285 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-78d5d44766-fx2tm" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.141285 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq"] Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.142875 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.145238 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.150773 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.157219 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq"] Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.335783 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e80c711-3bc5-4e23-baf4-58a5e12a287d-config-volume\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.335914 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlblg\" (UniqueName: \"kubernetes.io/projected/4e80c711-3bc5-4e23-baf4-58a5e12a287d-kube-api-access-nlblg\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.335955 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e80c711-3bc5-4e23-baf4-58a5e12a287d-secret-volume\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.437826 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e80c711-3bc5-4e23-baf4-58a5e12a287d-config-volume\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.437921 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlblg\" (UniqueName: \"kubernetes.io/projected/4e80c711-3bc5-4e23-baf4-58a5e12a287d-kube-api-access-nlblg\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.437955 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e80c711-3bc5-4e23-baf4-58a5e12a287d-secret-volume\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.439024 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e80c711-3bc5-4e23-baf4-58a5e12a287d-config-volume\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.444642 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e80c711-3bc5-4e23-baf4-58a5e12a287d-secret-volume\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.459838 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlblg\" (UniqueName: \"kubernetes.io/projected/4e80c711-3bc5-4e23-baf4-58a5e12a287d-kube-api-access-nlblg\") pod \"collect-profiles-29405790-hrdjq\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:00 crc kubenswrapper[4954]: I1128 16:30:00.471549 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:01 crc kubenswrapper[4954]: I1128 16:30:01.748856 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq"] Nov 28 16:30:01 crc kubenswrapper[4954]: W1128 16:30:01.767275 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e80c711_3bc5_4e23_baf4_58a5e12a287d.slice/crio-06406cbb6abbaf1744e45115b85647adcbeaee9e5701dfb051e4ef46dcf444d0 WatchSource:0}: Error finding container 06406cbb6abbaf1744e45115b85647adcbeaee9e5701dfb051e4ef46dcf444d0: Status 404 returned error can't find the container with id 06406cbb6abbaf1744e45115b85647adcbeaee9e5701dfb051e4ef46dcf444d0 Nov 28 16:30:01 crc kubenswrapper[4954]: I1128 16:30:01.914133 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" event={"ID":"4e80c711-3bc5-4e23-baf4-58a5e12a287d","Type":"ContainerStarted","Data":"06406cbb6abbaf1744e45115b85647adcbeaee9e5701dfb051e4ef46dcf444d0"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.480947 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.481025 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.937082 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" event={"ID":"ba448a3d-8e7a-4d75-8853-8cf28954b48d","Type":"ContainerStarted","Data":"6b6f9809953c31ebb5ed76d6f305688159f05490a255b8777065627c5ff9954f"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.937654 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.950040 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" event={"ID":"b2643938-ed66-4bf8-b080-24b3f8149d15","Type":"ContainerStarted","Data":"f3a38a45efac02265a692df66b4e82a3472851604a6ea6a224c51339ec240870"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.952081 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" event={"ID":"1bf94f9e-ea95-4f86-9cc6-3bc27093b601","Type":"ContainerStarted","Data":"704ef7cc0341bdfde7ba932e827f2bd3c25414dbf47614b69eea200d4a542ff2"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.953758 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" event={"ID":"d036c938-f2af-483f-b0a7-2a00101a5e7f","Type":"ContainerStarted","Data":"ff5a48c09e5b9ec176e27d6a17ad05b3e22fda16c1e7093abb7bbbd68f2afb88"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.955453 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" event={"ID":"2023394a-92c9-4f1e-8823-f0e33c9381b3","Type":"ContainerStarted","Data":"249b2f46e83845baec55d76272f6bc67ca2afe33fe21a93e15ba94871707bfe7"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.955581 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.957547 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" event={"ID":"1a866879-c10a-477b-8e98-322e057ea708","Type":"ContainerStarted","Data":"08a2cec0a2dbf31dce1b1d5d4cf2ed2279d08db048fa1bdd3a31b49cc397ab8a"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.959868 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" event={"ID":"4e80c711-3bc5-4e23-baf4-58a5e12a287d","Type":"ContainerStarted","Data":"7c9a2fe6844965d9f856d01f84e3192c4b05d855025c2ba5cf7660c022bbbf78"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.961262 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" event={"ID":"ac397e56-94ea-4618-8516-dba8864212ef","Type":"ContainerStarted","Data":"7ce459307a81f603b8a7b312483a655b50529773e63db6e57dbf47744306b1be"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.966203 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" event={"ID":"1e466f0e-3bcc-4812-9939-90e852e54c6e","Type":"ContainerStarted","Data":"e36e50577016c7759562035077f71ca089f959d49c5ca27176e3222a948a267e"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.969755 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" event={"ID":"1aff6577-6109-4234-8300-62afe871125b","Type":"ContainerStarted","Data":"fe55ab30fb9e6aa2423d83d88556678f6b1d44db25d6d4ce14725c2b60e66096"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.971808 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" event={"ID":"273ea62c-4545-4d8e-b831-ad40b8413f6d","Type":"ContainerStarted","Data":"151cc02d2f2e275bda70d287656df72c7a825cec22f88979442bb8e1db836899"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.971970 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.974554 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" event={"ID":"b6a73982-7f81-4947-938d-331487f421e0","Type":"ContainerStarted","Data":"ff569ae540368b8f7feefdb2e8d47073e606329c87ec2e54b499586f3bbe1fde"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.976264 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" event={"ID":"801feb18-7515-4b28-a364-23c2ad1d29a2","Type":"ContainerStarted","Data":"a9434c13e8fbe0df087d40b16302ef38dbe62693cd1141426709eb48d39d04a1"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.978398 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" event={"ID":"ac579e75-f040-4640-ac48-7760cbce72ed","Type":"ContainerStarted","Data":"3111ca27bc459f4137abb52670636ab50c0554f8f5b209d401d22ce94b056c81"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.980328 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" event={"ID":"22df58cc-9b57-40fd-a1fd-6dfb287daea1","Type":"ContainerStarted","Data":"1fb473a11bf102df22561df61727d12275e2b87c0d9c6d965e26ca44f9cb3807"} Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.980466 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" Nov 28 16:30:02 crc kubenswrapper[4954]: I1128 16:30:02.986003 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" podStartSLOduration=12.260164739 podStartE2EDuration="50.985985824s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.707544794 +0000 UTC m=+1108.099213335" lastFinishedPulling="2025-11-28 16:29:53.433365879 +0000 UTC m=+1146.825034420" observedRunningTime="2025-11-28 16:30:02.978921722 +0000 UTC m=+1156.370590283" watchObservedRunningTime="2025-11-28 16:30:02.985985824 +0000 UTC m=+1156.377654355" Nov 28 16:30:03 crc kubenswrapper[4954]: I1128 16:30:03.026626 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" podStartSLOduration=3.535702206 podStartE2EDuration="51.026607975s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:13.798123396 +0000 UTC m=+1107.189791937" lastFinishedPulling="2025-11-28 16:30:01.289029165 +0000 UTC m=+1154.680697706" observedRunningTime="2025-11-28 16:30:03.019939847 +0000 UTC m=+1156.411608378" watchObservedRunningTime="2025-11-28 16:30:03.026607975 +0000 UTC m=+1156.418276516" Nov 28 16:30:03 crc kubenswrapper[4954]: I1128 16:30:03.055300 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" podStartSLOduration=12.385886665 podStartE2EDuration="51.055281744s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.817593519 +0000 UTC m=+1108.209262060" lastFinishedPulling="2025-11-28 16:29:53.486988588 +0000 UTC m=+1146.878657139" observedRunningTime="2025-11-28 16:30:03.051781304 +0000 UTC m=+1156.443449855" watchObservedRunningTime="2025-11-28 16:30:03.055281744 +0000 UTC m=+1156.446950285" Nov 28 16:30:03 crc kubenswrapper[4954]: I1128 16:30:03.084094 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" podStartSLOduration=3.084063405 podStartE2EDuration="3.084063405s" podCreationTimestamp="2025-11-28 16:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:30:03.07370523 +0000 UTC m=+1156.465373771" watchObservedRunningTime="2025-11-28 16:30:03.084063405 +0000 UTC m=+1156.475731946" Nov 28 16:30:03 crc kubenswrapper[4954]: E1128 16:30:03.319805 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" podUID="b6a73982-7f81-4947-938d-331487f421e0" Nov 28 16:30:03 crc kubenswrapper[4954]: I1128 16:30:03.414780 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" podStartSLOduration=4.317935272 podStartE2EDuration="51.414759931s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.137631037 +0000 UTC m=+1107.529299578" lastFinishedPulling="2025-11-28 16:30:01.234455676 +0000 UTC m=+1154.626124237" observedRunningTime="2025-11-28 16:30:03.407909636 +0000 UTC m=+1156.799578187" watchObservedRunningTime="2025-11-28 16:30:03.414759931 +0000 UTC m=+1156.806428472" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.008342 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" event={"ID":"b2643938-ed66-4bf8-b080-24b3f8149d15","Type":"ContainerStarted","Data":"f78d96305333d3d9d2c99cde369d13812c3d1f1371b7069f32b991a6cd839530"} Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.008712 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.014421 4954 generic.go:334] "Generic (PLEG): container finished" podID="4e80c711-3bc5-4e23-baf4-58a5e12a287d" containerID="7c9a2fe6844965d9f856d01f84e3192c4b05d855025c2ba5cf7660c022bbbf78" exitCode=0 Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.014490 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" event={"ID":"4e80c711-3bc5-4e23-baf4-58a5e12a287d","Type":"ContainerDied","Data":"7c9a2fe6844965d9f856d01f84e3192c4b05d855025c2ba5cf7660c022bbbf78"} Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.040319 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" podStartSLOduration=41.236691939 podStartE2EDuration="52.040302859s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:46.168731931 +0000 UTC m=+1139.560400472" lastFinishedPulling="2025-11-28 16:29:56.972342851 +0000 UTC m=+1150.364011392" observedRunningTime="2025-11-28 16:30:04.037501231 +0000 UTC m=+1157.429169782" watchObservedRunningTime="2025-11-28 16:30:04.040302859 +0000 UTC m=+1157.431971420" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.062394 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" podStartSLOduration=4.834061524 podStartE2EDuration="52.06237768s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.060094839 +0000 UTC m=+1107.451763380" lastFinishedPulling="2025-11-28 16:30:01.288410995 +0000 UTC m=+1154.680079536" observedRunningTime="2025-11-28 16:30:04.058658474 +0000 UTC m=+1157.450327015" watchObservedRunningTime="2025-11-28 16:30:04.06237768 +0000 UTC m=+1157.454046221" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.125371 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" podStartSLOduration=5.534075464 podStartE2EDuration="52.125352302s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.574627251 +0000 UTC m=+1107.966295792" lastFinishedPulling="2025-11-28 16:30:01.165904089 +0000 UTC m=+1154.557572630" observedRunningTime="2025-11-28 16:30:04.124328249 +0000 UTC m=+1157.515996800" watchObservedRunningTime="2025-11-28 16:30:04.125352302 +0000 UTC m=+1157.517020853" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.152370 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" podStartSLOduration=5.195665837 podStartE2EDuration="52.152350167s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.277764825 +0000 UTC m=+1107.669433366" lastFinishedPulling="2025-11-28 16:30:01.234449155 +0000 UTC m=+1154.626117696" observedRunningTime="2025-11-28 16:30:04.144832542 +0000 UTC m=+1157.536501093" watchObservedRunningTime="2025-11-28 16:30:04.152350167 +0000 UTC m=+1157.544018708" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.165762 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" podStartSLOduration=5.59557401 podStartE2EDuration="52.165747246s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.721613955 +0000 UTC m=+1108.113282496" lastFinishedPulling="2025-11-28 16:30:01.291787191 +0000 UTC m=+1154.683455732" observedRunningTime="2025-11-28 16:30:04.158750548 +0000 UTC m=+1157.550419089" watchObservedRunningTime="2025-11-28 16:30:04.165747246 +0000 UTC m=+1157.557415787" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.185442 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" podStartSLOduration=5.581181728 podStartE2EDuration="52.185427712s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.684216703 +0000 UTC m=+1108.075885244" lastFinishedPulling="2025-11-28 16:30:01.288462687 +0000 UTC m=+1154.680131228" observedRunningTime="2025-11-28 16:30:04.184921997 +0000 UTC m=+1157.576590538" watchObservedRunningTime="2025-11-28 16:30:04.185427712 +0000 UTC m=+1157.577096253" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.240310 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" podStartSLOduration=5.903935816 podStartE2EDuration="52.240292261s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.949031386 +0000 UTC m=+1108.340699927" lastFinishedPulling="2025-11-28 16:30:01.285387801 +0000 UTC m=+1154.677056372" observedRunningTime="2025-11-28 16:30:04.227380116 +0000 UTC m=+1157.619048657" watchObservedRunningTime="2025-11-28 16:30:04.240292261 +0000 UTC m=+1157.631960792" Nov 28 16:30:04 crc kubenswrapper[4954]: I1128 16:30:04.243230 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" podStartSLOduration=5.756871691 podStartE2EDuration="52.243222543s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.8153817 +0000 UTC m=+1108.207050241" lastFinishedPulling="2025-11-28 16:30:01.301732552 +0000 UTC m=+1154.693401093" observedRunningTime="2025-11-28 16:30:04.23962363 +0000 UTC m=+1157.631292191" watchObservedRunningTime="2025-11-28 16:30:04.243222543 +0000 UTC m=+1157.634891084" Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.417835 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.543801 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e80c711-3bc5-4e23-baf4-58a5e12a287d-secret-volume\") pod \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.543997 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e80c711-3bc5-4e23-baf4-58a5e12a287d-config-volume\") pod \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.544023 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlblg\" (UniqueName: \"kubernetes.io/projected/4e80c711-3bc5-4e23-baf4-58a5e12a287d-kube-api-access-nlblg\") pod \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\" (UID: \"4e80c711-3bc5-4e23-baf4-58a5e12a287d\") " Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.544739 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e80c711-3bc5-4e23-baf4-58a5e12a287d-config-volume" (OuterVolumeSpecName: "config-volume") pod "4e80c711-3bc5-4e23-baf4-58a5e12a287d" (UID: "4e80c711-3bc5-4e23-baf4-58a5e12a287d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.549763 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e80c711-3bc5-4e23-baf4-58a5e12a287d-kube-api-access-nlblg" (OuterVolumeSpecName: "kube-api-access-nlblg") pod "4e80c711-3bc5-4e23-baf4-58a5e12a287d" (UID: "4e80c711-3bc5-4e23-baf4-58a5e12a287d"). InnerVolumeSpecName "kube-api-access-nlblg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.556876 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e80c711-3bc5-4e23-baf4-58a5e12a287d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4e80c711-3bc5-4e23-baf4-58a5e12a287d" (UID: "4e80c711-3bc5-4e23-baf4-58a5e12a287d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.645821 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e80c711-3bc5-4e23-baf4-58a5e12a287d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.645853 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlblg\" (UniqueName: \"kubernetes.io/projected/4e80c711-3bc5-4e23-baf4-58a5e12a287d-kube-api-access-nlblg\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:05 crc kubenswrapper[4954]: I1128 16:30:05.645867 4954 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e80c711-3bc5-4e23-baf4-58a5e12a287d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:06 crc kubenswrapper[4954]: I1128 16:30:06.029848 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" event={"ID":"ac397e56-94ea-4618-8516-dba8864212ef","Type":"ContainerStarted","Data":"d4bbfc89bef5c8ea62fceb91ec3782f97d0b05049a8ef36f56299a77f9052b15"} Nov 28 16:30:06 crc kubenswrapper[4954]: I1128 16:30:06.030491 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:30:06 crc kubenswrapper[4954]: I1128 16:30:06.031791 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" event={"ID":"4e80c711-3bc5-4e23-baf4-58a5e12a287d","Type":"ContainerDied","Data":"06406cbb6abbaf1744e45115b85647adcbeaee9e5701dfb051e4ef46dcf444d0"} Nov 28 16:30:06 crc kubenswrapper[4954]: I1128 16:30:06.031875 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06406cbb6abbaf1744e45115b85647adcbeaee9e5701dfb051e4ef46dcf444d0" Nov 28 16:30:06 crc kubenswrapper[4954]: I1128 16:30:06.031894 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq" Nov 28 16:30:06 crc kubenswrapper[4954]: I1128 16:30:06.049506 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" podStartSLOduration=39.037232185 podStartE2EDuration="54.049490536s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:46.154419403 +0000 UTC m=+1139.546087944" lastFinishedPulling="2025-11-28 16:30:01.166677744 +0000 UTC m=+1154.558346295" observedRunningTime="2025-11-28 16:30:06.04898759 +0000 UTC m=+1159.440656141" watchObservedRunningTime="2025-11-28 16:30:06.049490536 +0000 UTC m=+1159.441159077" Nov 28 16:30:07 crc kubenswrapper[4954]: I1128 16:30:07.046796 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-c6pr6" Nov 28 16:30:09 crc kubenswrapper[4954]: I1128 16:30:09.056407 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" event={"ID":"b6a73982-7f81-4947-938d-331487f421e0","Type":"ContainerStarted","Data":"d3ba510843286c9884a369890fdf9814598ecf1972c6d19fe3ba6e872de308bd"} Nov 28 16:30:09 crc kubenswrapper[4954]: I1128 16:30:09.057025 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" Nov 28 16:30:09 crc kubenswrapper[4954]: I1128 16:30:09.080106 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" podStartSLOduration=3.835115232 podStartE2EDuration="57.080067726s" podCreationTimestamp="2025-11-28 16:29:12 +0000 UTC" firstStartedPulling="2025-11-28 16:29:14.585397008 +0000 UTC m=+1107.977065549" lastFinishedPulling="2025-11-28 16:30:07.830349502 +0000 UTC m=+1161.222018043" observedRunningTime="2025-11-28 16:30:09.075276726 +0000 UTC m=+1162.466945267" watchObservedRunningTime="2025-11-28 16:30:09.080067726 +0000 UTC m=+1162.471736267" Nov 28 16:30:09 crc kubenswrapper[4954]: I1128 16:30:09.104966 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd4r279n" Nov 28 16:30:12 crc kubenswrapper[4954]: I1128 16:30:12.686920 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-vd2qv" Nov 28 16:30:12 crc kubenswrapper[4954]: I1128 16:30:12.700456 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-dxqks" Nov 28 16:30:12 crc kubenswrapper[4954]: I1128 16:30:12.754159 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" Nov 28 16:30:12 crc kubenswrapper[4954]: I1128 16:30:12.757048 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-kvcbm" Nov 28 16:30:12 crc kubenswrapper[4954]: I1128 16:30:12.959215 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" Nov 28 16:30:12 crc kubenswrapper[4954]: I1128 16:30:12.962772 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-nkpj2" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.105000 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.108345 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-svnnf" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.159300 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-mbhfd" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.193426 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.195888 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-jrqz6" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.212404 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-9vjc5" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.393855 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.395816 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-stknf" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.526319 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-8wvs2" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.714088 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.716779 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-7fjtc" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.784595 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" Nov 28 16:30:13 crc kubenswrapper[4954]: I1128 16:30:13.786255 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-62lbb" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.805743 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lgkkh"] Nov 28 16:30:31 crc kubenswrapper[4954]: E1128 16:30:31.806875 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e80c711-3bc5-4e23-baf4-58a5e12a287d" containerName="collect-profiles" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.806901 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e80c711-3bc5-4e23-baf4-58a5e12a287d" containerName="collect-profiles" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.807170 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e80c711-3bc5-4e23-baf4-58a5e12a287d" containerName="collect-profiles" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.808264 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.810559 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.810787 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.810944 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-q76b7" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.811297 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.814602 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lgkkh"] Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.880849 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-dwgcm"] Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.882108 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.883665 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.915025 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-dwgcm"] Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.993588 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hxjm\" (UniqueName: \"kubernetes.io/projected/1653c411-d0eb-434b-9808-59c7aeee21af-kube-api-access-5hxjm\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.994029 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9nzr\" (UniqueName: \"kubernetes.io/projected/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-kube-api-access-p9nzr\") pod \"dnsmasq-dns-675f4bcbfc-lgkkh\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.994152 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-config\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.994367 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-config\") pod \"dnsmasq-dns-675f4bcbfc-lgkkh\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:31 crc kubenswrapper[4954]: I1128 16:30:31.994453 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.096122 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-config\") pod \"dnsmasq-dns-675f4bcbfc-lgkkh\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.096174 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.096212 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hxjm\" (UniqueName: \"kubernetes.io/projected/1653c411-d0eb-434b-9808-59c7aeee21af-kube-api-access-5hxjm\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.096241 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9nzr\" (UniqueName: \"kubernetes.io/projected/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-kube-api-access-p9nzr\") pod \"dnsmasq-dns-675f4bcbfc-lgkkh\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.096265 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-config\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.097243 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-config\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.097299 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-config\") pod \"dnsmasq-dns-675f4bcbfc-lgkkh\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.097358 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.117353 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hxjm\" (UniqueName: \"kubernetes.io/projected/1653c411-d0eb-434b-9808-59c7aeee21af-kube-api-access-5hxjm\") pod \"dnsmasq-dns-78dd6ddcc-dwgcm\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.122478 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9nzr\" (UniqueName: \"kubernetes.io/projected/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-kube-api-access-p9nzr\") pod \"dnsmasq-dns-675f4bcbfc-lgkkh\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.130740 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.276191 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.480763 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.481164 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.481216 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.481969 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"01dcf9beb4ee4352c09c120b16a87c14a68604f09bcc07e4fe16753903887aac"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.482039 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://01dcf9beb4ee4352c09c120b16a87c14a68604f09bcc07e4fe16753903887aac" gracePeriod=600 Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.689009 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lgkkh"] Nov 28 16:30:32 crc kubenswrapper[4954]: W1128 16:30:32.943653 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1653c411_d0eb_434b_9808_59c7aeee21af.slice/crio-11dcf6ab3b449ab0d1a108b292cd2b1b661687310b432340049c581201ed555d WatchSource:0}: Error finding container 11dcf6ab3b449ab0d1a108b292cd2b1b661687310b432340049c581201ed555d: Status 404 returned error can't find the container with id 11dcf6ab3b449ab0d1a108b292cd2b1b661687310b432340049c581201ed555d Nov 28 16:30:32 crc kubenswrapper[4954]: I1128 16:30:32.948850 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-dwgcm"] Nov 28 16:30:33 crc kubenswrapper[4954]: I1128 16:30:33.370323 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" event={"ID":"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83","Type":"ContainerStarted","Data":"7dff3e0c49e1d31cabdcf270e0aaee5f5562e48fd15ca0053e12ffea99413b83"} Nov 28 16:30:33 crc kubenswrapper[4954]: I1128 16:30:33.371675 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" event={"ID":"1653c411-d0eb-434b-9808-59c7aeee21af","Type":"ContainerStarted","Data":"11dcf6ab3b449ab0d1a108b292cd2b1b661687310b432340049c581201ed555d"} Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.383050 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="01dcf9beb4ee4352c09c120b16a87c14a68604f09bcc07e4fe16753903887aac" exitCode=0 Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.383140 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"01dcf9beb4ee4352c09c120b16a87c14a68604f09bcc07e4fe16753903887aac"} Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.383454 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"63d17d14b7b387446e6790d2542f732de1df551f140ddd4727c4084d5078e4ad"} Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.383507 4954 scope.go:117] "RemoveContainer" containerID="81405dd859c5dce906875516afa2e66fb81aa5bf95b3268c18658c9aa3d313f9" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.675101 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lgkkh"] Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.686054 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7s4hp"] Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.690145 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.697281 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7s4hp"] Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.837592 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-config\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.837702 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tct8t\" (UniqueName: \"kubernetes.io/projected/4d22fef9-d66b-44b7-b13b-d39bb237622d-kube-api-access-tct8t\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.837738 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.939303 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tct8t\" (UniqueName: \"kubernetes.io/projected/4d22fef9-d66b-44b7-b13b-d39bb237622d-kube-api-access-tct8t\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.940009 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.940080 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-config\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.940811 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.941763 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-config\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:34 crc kubenswrapper[4954]: I1128 16:30:34.971745 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tct8t\" (UniqueName: \"kubernetes.io/projected/4d22fef9-d66b-44b7-b13b-d39bb237622d-kube-api-access-tct8t\") pod \"dnsmasq-dns-666b6646f7-7s4hp\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.019562 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-dwgcm"] Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.026150 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.045013 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-cfvbr"] Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.047617 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.060951 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-cfvbr"] Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.151823 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m4hn\" (UniqueName: \"kubernetes.io/projected/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-kube-api-access-9m4hn\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.151888 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-config\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.151941 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.252508 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m4hn\" (UniqueName: \"kubernetes.io/projected/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-kube-api-access-9m4hn\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.252579 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-config\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.252618 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.253954 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-config\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.254003 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.324684 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m4hn\" (UniqueName: \"kubernetes.io/projected/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-kube-api-access-9m4hn\") pod \"dnsmasq-dns-57d769cc4f-cfvbr\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.384782 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.789936 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7s4hp"] Nov 28 16:30:35 crc kubenswrapper[4954]: W1128 16:30:35.806479 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d22fef9_d66b_44b7_b13b_d39bb237622d.slice/crio-4b3b96c5a743dd205ac1e7db27b348a6e4804f0e6f39103b9a7047ca23c89867 WatchSource:0}: Error finding container 4b3b96c5a743dd205ac1e7db27b348a6e4804f0e6f39103b9a7047ca23c89867: Status 404 returned error can't find the container with id 4b3b96c5a743dd205ac1e7db27b348a6e4804f0e6f39103b9a7047ca23c89867 Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.825900 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.831012 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.836062 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.836643 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.836837 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.836996 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.837147 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.837293 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-49g66" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.837442 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.846037 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.852622 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-cfvbr"] Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967448 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967492 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967595 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967642 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967668 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967691 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8a252e1a-c96a-4f98-b24e-b224fedf344c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967793 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8a252e1a-c96a-4f98-b24e-b224fedf344c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.967990 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.968178 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.968232 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkddb\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-kube-api-access-vkddb\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:35 crc kubenswrapper[4954]: I1128 16:30:35.968267 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069316 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069362 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8a252e1a-c96a-4f98-b24e-b224fedf344c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069387 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8a252e1a-c96a-4f98-b24e-b224fedf344c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069412 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069460 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069482 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkddb\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-kube-api-access-vkddb\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069513 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069555 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069573 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069598 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.069621 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.070264 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.071144 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.071548 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.072808 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.073148 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.075045 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.077086 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.080063 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.087021 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8a252e1a-c96a-4f98-b24e-b224fedf344c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.089363 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8a252e1a-c96a-4f98-b24e-b224fedf344c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.109457 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkddb\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-kube-api-access-vkddb\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.126943 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.128688 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.131226 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.131560 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.131685 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.131796 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.132024 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-8278r" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.132139 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.132255 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.152667 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.155248 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.165925 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272217 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272263 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272290 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272324 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca81ef12-eb13-468e-81fc-0fdf6aba8830-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272370 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272395 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272438 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272460 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g7mp\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-kube-api-access-7g7mp\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272482 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272500 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca81ef12-eb13-468e-81fc-0fdf6aba8830-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.272517 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.374142 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.374450 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.388822 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.388874 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g7mp\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-kube-api-access-7g7mp\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.388920 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.388957 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca81ef12-eb13-468e-81fc-0fdf6aba8830-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.388979 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.389011 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.389047 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.389098 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.389129 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca81ef12-eb13-468e-81fc-0fdf6aba8830-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.389240 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.389595 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.396268 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.397280 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.397720 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.397925 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.398181 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.399178 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.402027 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca81ef12-eb13-468e-81fc-0fdf6aba8830-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.402547 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca81ef12-eb13-468e-81fc-0fdf6aba8830-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.433247 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g7mp\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-kube-api-access-7g7mp\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.440769 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" event={"ID":"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2","Type":"ContainerStarted","Data":"e84a42f7f07544191c8230b009ab73195f8e8289500a3db764e8ac994ca701bc"} Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.443115 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" event={"ID":"4d22fef9-d66b-44b7-b13b-d39bb237622d","Type":"ContainerStarted","Data":"4b3b96c5a743dd205ac1e7db27b348a6e4804f0e6f39103b9a7047ca23c89867"} Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.448007 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.517861 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:30:36 crc kubenswrapper[4954]: I1128 16:30:36.915918 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:30:36 crc kubenswrapper[4954]: W1128 16:30:36.934061 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a252e1a_c96a_4f98_b24e_b224fedf344c.slice/crio-97b453813744c95117932caad133352dde444a5f9d42c494462fd93989d711ed WatchSource:0}: Error finding container 97b453813744c95117932caad133352dde444a5f9d42c494462fd93989d711ed: Status 404 returned error can't find the container with id 97b453813744c95117932caad133352dde444a5f9d42c494462fd93989d711ed Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.288938 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:30:37 crc kubenswrapper[4954]: W1128 16:30:37.301253 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca81ef12_eb13_468e_81fc_0fdf6aba8830.slice/crio-ccf80cb55981316753bed96b4d01216050bda14ebc5320cd7e46d2ec96fb5337 WatchSource:0}: Error finding container ccf80cb55981316753bed96b4d01216050bda14ebc5320cd7e46d2ec96fb5337: Status 404 returned error can't find the container with id ccf80cb55981316753bed96b4d01216050bda14ebc5320cd7e46d2ec96fb5337 Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.340979 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.343082 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.345649 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.346084 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.348091 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.348493 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-5bnld" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.361699 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.368036 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.475420 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8a252e1a-c96a-4f98-b24e-b224fedf344c","Type":"ContainerStarted","Data":"97b453813744c95117932caad133352dde444a5f9d42c494462fd93989d711ed"} Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.477635 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca81ef12-eb13-468e-81fc-0fdf6aba8830","Type":"ContainerStarted","Data":"ccf80cb55981316753bed96b4d01216050bda14ebc5320cd7e46d2ec96fb5337"} Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508268 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508331 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508384 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d348de7-2e67-41df-8d59-4692491ea145-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508412 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508439 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25lbv\" (UniqueName: \"kubernetes.io/projected/1d348de7-2e67-41df-8d59-4692491ea145-kube-api-access-25lbv\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508467 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508512 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.508641 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.612590 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.612695 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d348de7-2e67-41df-8d59-4692491ea145-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.612726 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.612758 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25lbv\" (UniqueName: \"kubernetes.io/projected/1d348de7-2e67-41df-8d59-4692491ea145-kube-api-access-25lbv\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.612813 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.612888 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.612972 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.613038 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.613584 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.615710 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d348de7-2e67-41df-8d59-4692491ea145-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.617272 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.617925 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.618564 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.630559 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.639781 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25lbv\" (UniqueName: \"kubernetes.io/projected/1d348de7-2e67-41df-8d59-4692491ea145-kube-api-access-25lbv\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.645460 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.681446 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " pod="openstack/openstack-galera-0" Nov 28 16:30:37 crc kubenswrapper[4954]: I1128 16:30:37.691037 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.487706 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:30:38 crc kubenswrapper[4954]: W1128 16:30:38.513722 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d348de7_2e67_41df_8d59_4692491ea145.slice/crio-35067c7d8aad12b2c7373fb07d737310ed05c7b84ca8d0d3a924cc92d448d14d WatchSource:0}: Error finding container 35067c7d8aad12b2c7373fb07d737310ed05c7b84ca8d0d3a924cc92d448d14d: Status 404 returned error can't find the container with id 35067c7d8aad12b2c7373fb07d737310ed05c7b84ca8d0d3a924cc92d448d14d Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.708932 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.710404 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.713015 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-85bff" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.713353 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.713679 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.713896 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.715666 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835200 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835398 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835466 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835509 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjxqx\" (UniqueName: \"kubernetes.io/projected/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kube-api-access-rjxqx\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835575 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835631 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835668 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.835712 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937100 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937138 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937165 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937193 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjxqx\" (UniqueName: \"kubernetes.io/projected/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kube-api-access-rjxqx\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937213 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937240 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937262 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.937278 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.938009 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.938206 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.938393 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.939032 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.939250 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.945055 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.959507 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjxqx\" (UniqueName: \"kubernetes.io/projected/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kube-api-access-rjxqx\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.963658 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.966227 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.976956 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.984433 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.989242 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-xlr8c" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.989478 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 16:30:38 crc kubenswrapper[4954]: I1128 16:30:38.989684 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.010407 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.037176 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.139286 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.139336 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-config-data\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.139392 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kolla-config\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.139412 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.139438 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kube-api-access-jbnrh\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.244171 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.244222 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-config-data\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.244310 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kolla-config\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.244335 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.244355 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kube-api-access-jbnrh\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.247831 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-config-data\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.248816 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kolla-config\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.252827 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.252970 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.262822 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kube-api-access-jbnrh\") pod \"memcached-0\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.344697 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.598604 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d348de7-2e67-41df-8d59-4692491ea145","Type":"ContainerStarted","Data":"35067c7d8aad12b2c7373fb07d737310ed05c7b84ca8d0d3a924cc92d448d14d"} Nov 28 16:30:39 crc kubenswrapper[4954]: I1128 16:30:39.899430 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:30:40 crc kubenswrapper[4954]: I1128 16:30:40.257505 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 16:30:40 crc kubenswrapper[4954]: I1128 16:30:40.621834 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2fb4ddd8-d914-431c-a39f-28a0c6b45354","Type":"ContainerStarted","Data":"970dfc526f56fecb685934a29eb459b08bf4eab23eab54c39b13b30034bbd8ed"} Nov 28 16:30:40 crc kubenswrapper[4954]: I1128 16:30:40.634364 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"afe4302a-413f-48e1-90a9-3f1178e5c6f7","Type":"ContainerStarted","Data":"d87151f70a2a333463796e25a28edf09ad345c5c289f9c1ca91b7366806f6f6f"} Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.034858 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.036233 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.042977 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-8lr45" Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.046845 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.193259 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b4df\" (UniqueName: \"kubernetes.io/projected/dc71f947-acc3-4867-8e70-0f2def81dc03-kube-api-access-7b4df\") pod \"kube-state-metrics-0\" (UID: \"dc71f947-acc3-4867-8e70-0f2def81dc03\") " pod="openstack/kube-state-metrics-0" Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.294855 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b4df\" (UniqueName: \"kubernetes.io/projected/dc71f947-acc3-4867-8e70-0f2def81dc03-kube-api-access-7b4df\") pod \"kube-state-metrics-0\" (UID: \"dc71f947-acc3-4867-8e70-0f2def81dc03\") " pod="openstack/kube-state-metrics-0" Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.314198 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b4df\" (UniqueName: \"kubernetes.io/projected/dc71f947-acc3-4867-8e70-0f2def81dc03-kube-api-access-7b4df\") pod \"kube-state-metrics-0\" (UID: \"dc71f947-acc3-4867-8e70-0f2def81dc03\") " pod="openstack/kube-state-metrics-0" Nov 28 16:30:41 crc kubenswrapper[4954]: I1128 16:30:41.359076 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:30:42 crc kubenswrapper[4954]: I1128 16:30:42.058308 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.726372 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vmnzt"] Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.729902 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.732136 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.732792 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-msc4r" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.733119 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.740123 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vmnzt"] Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.746363 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-d4vfs"] Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.748073 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.755431 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-d4vfs"] Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851553 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-run\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851629 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run-ovn\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851662 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-lib\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851772 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-combined-ca-bundle\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851799 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-log\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851841 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-ovn-controller-tls-certs\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851858 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-scripts\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851884 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-log-ovn\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851925 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctpg7\" (UniqueName: \"kubernetes.io/projected/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-kube-api-access-ctpg7\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851951 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-etc-ovs\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851966 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c94w8\" (UniqueName: \"kubernetes.io/projected/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-kube-api-access-c94w8\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.851984 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-scripts\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.852294 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959697 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-scripts\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959784 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-ovn-controller-tls-certs\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959816 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-log-ovn\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959834 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctpg7\" (UniqueName: \"kubernetes.io/projected/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-kube-api-access-ctpg7\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959866 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-etc-ovs\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959882 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-scripts\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959897 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c94w8\" (UniqueName: \"kubernetes.io/projected/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-kube-api-access-c94w8\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959941 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.959961 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-run\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.960000 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run-ovn\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.960044 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-lib\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.960116 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-combined-ca-bundle\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.960146 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-log\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.960907 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-log\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.961096 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.962650 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-scripts\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.962912 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-run\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.963062 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run-ovn\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.963884 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-lib\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.964438 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-etc-ovs\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.964476 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-log-ovn\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.970764 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-scripts\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.985140 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-ovn-controller-tls-certs\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.985759 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-combined-ca-bundle\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.992704 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctpg7\" (UniqueName: \"kubernetes.io/projected/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-kube-api-access-ctpg7\") pod \"ovn-controller-vmnzt\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:44 crc kubenswrapper[4954]: I1128 16:30:44.995032 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c94w8\" (UniqueName: \"kubernetes.io/projected/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-kube-api-access-c94w8\") pod \"ovn-controller-ovs-d4vfs\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.091457 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.105675 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.287810 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.291763 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.297145 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.297458 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-7pbrc" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.297622 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.298024 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.298079 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.308262 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365438 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365577 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365633 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365661 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365715 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365768 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-config\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365789 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfhnx\" (UniqueName: \"kubernetes.io/projected/892117aa-d306-4a1d-bf6e-b203b6337537-kube-api-access-xfhnx\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.365818 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.467610 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-config\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.467760 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfhnx\" (UniqueName: \"kubernetes.io/projected/892117aa-d306-4a1d-bf6e-b203b6337537-kube-api-access-xfhnx\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.467790 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.467828 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.467852 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.468241 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.468263 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.468303 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.468778 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.472570 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.473118 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-config\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.473747 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.478120 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.479082 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.490183 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.493078 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.500694 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfhnx\" (UniqueName: \"kubernetes.io/projected/892117aa-d306-4a1d-bf6e-b203b6337537-kube-api-access-xfhnx\") pod \"ovsdbserver-nb-0\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:45 crc kubenswrapper[4954]: I1128 16:30:45.609120 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.501252 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.506773 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.511494 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.512123 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.512234 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.512516 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-d45qp" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.514713 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.602220 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.602652 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.602723 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.603029 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx85p\" (UniqueName: \"kubernetes.io/projected/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-kube-api-access-jx85p\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.603273 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.603446 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.603630 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-config\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.603671 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.705805 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.705866 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.705894 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.705939 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx85p\" (UniqueName: \"kubernetes.io/projected/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-kube-api-access-jx85p\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.705987 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.706027 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.706066 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-config\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.706120 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.707142 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.707754 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.709098 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.710051 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-config\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.714135 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.719651 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.720158 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc71f947-acc3-4867-8e70-0f2def81dc03","Type":"ContainerStarted","Data":"a70560ccbf5fff039847fd4be765f18e26d087a622c00f712f67b08b6abfd945"} Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.724662 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.725001 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx85p\" (UniqueName: \"kubernetes.io/projected/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-kube-api-access-jx85p\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.731043 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:47 crc kubenswrapper[4954]: I1128 16:30:47.831672 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:30:53 crc kubenswrapper[4954]: E1128 16:30:53.267016 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 16:30:53 crc kubenswrapper[4954]: E1128 16:30:53.268254 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7g7mp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(ca81ef12-eb13-468e-81fc-0fdf6aba8830): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:30:53 crc kubenswrapper[4954]: E1128 16:30:53.270063 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" Nov 28 16:30:53 crc kubenswrapper[4954]: E1128 16:30:53.767772 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.406589 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.407304 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rjxqx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(2fb4ddd8-d914-431c-a39f-28a0c6b45354): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.408459 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.414589 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.414741 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-25lbv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(1d348de7-2e67-41df-8d59-4692491ea145): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.415827 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="1d348de7-2e67-41df-8d59-4692491ea145" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.425144 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.425615 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vkddb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(8a252e1a-c96a-4f98-b24e-b224fedf344c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.426713 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.882861 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.883781 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" Nov 28 16:31:05 crc kubenswrapper[4954]: E1128 16:31:05.887163 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="1d348de7-2e67-41df-8d59-4692491ea145" Nov 28 16:31:05 crc kubenswrapper[4954]: I1128 16:31:05.899885 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vmnzt"] Nov 28 16:31:11 crc kubenswrapper[4954]: E1128 16:31:11.185202 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 28 16:31:11 crc kubenswrapper[4954]: E1128 16:31:11.185917 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n5hd8h686h5d8h4h57bh55h575h5d7h5f7hdh75h5d8h698h655hd4h679h94h75h65fh548h97h67dhbfh59h59h5fch696h5ffh5f4h589h658q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jbnrh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(afe4302a-413f-48e1-90a9-3f1178e5c6f7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:11 crc kubenswrapper[4954]: E1128 16:31:11.187291 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" Nov 28 16:31:11 crc kubenswrapper[4954]: I1128 16:31:11.923865 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt" event={"ID":"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1","Type":"ContainerStarted","Data":"06a79d8e20c952b6c20c1e9bde76dd0728e40021af86a13f5f7b4e4a3667a679"} Nov 28 16:31:11 crc kubenswrapper[4954]: E1128 16:31:11.925721 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" Nov 28 16:31:12 crc kubenswrapper[4954]: E1128 16:31:12.922326 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:31:12 crc kubenswrapper[4954]: E1128 16:31:12.922672 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5hxjm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-dwgcm_openstack(1653c411-d0eb-434b-9808-59c7aeee21af): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:12 crc kubenswrapper[4954]: E1128 16:31:12.924033 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" podUID="1653c411-d0eb-434b-9808-59c7aeee21af" Nov 28 16:31:12 crc kubenswrapper[4954]: E1128 16:31:12.968013 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:31:12 crc kubenswrapper[4954]: E1128 16:31:12.968235 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p9nzr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-lgkkh_openstack(c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:12 crc kubenswrapper[4954]: E1128 16:31:12.969368 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" podUID="c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.500008 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.500059 4954 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.500200 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7b4df,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(dc71f947-acc3-4867-8e70-0f2def81dc03): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled" logger="UnhandledError" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.501379 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying layer: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="dc71f947-acc3-4867-8e70-0f2def81dc03" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.522103 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.522358 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9m4hn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-cfvbr_openstack(f5a5a42d-2215-4bcb-a907-23a25bc6d5e2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.523587 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" podUID="f5a5a42d-2215-4bcb-a907-23a25bc6d5e2" Nov 28 16:31:13 crc kubenswrapper[4954]: I1128 16:31:13.606249 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.643680 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.643854 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tct8t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-7s4hp_openstack(4d22fef9-d66b-44b7-b13b-d39bb237622d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.645034 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" podUID="4d22fef9-d66b-44b7-b13b-d39bb237622d" Nov 28 16:31:13 crc kubenswrapper[4954]: I1128 16:31:13.944934 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6","Type":"ContainerStarted","Data":"44938311a6c954ceaa3fadd03c845344f2a50768d0efc651b5be5a5e7f8b8366"} Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.947486 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" podUID="4d22fef9-d66b-44b7-b13b-d39bb237622d" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.947597 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" podUID="f5a5a42d-2215-4bcb-a907-23a25bc6d5e2" Nov 28 16:31:13 crc kubenswrapper[4954]: E1128 16:31:13.948397 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="dc71f947-acc3-4867-8e70-0f2def81dc03" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.192401 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.311261 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-d4vfs"] Nov 28 16:31:14 crc kubenswrapper[4954]: W1128 16:31:14.313442 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f08fb1c_8d18_4b77_bbaf_adcf2c5bc2a8.slice/crio-47b82663b31676ac614d5a632c30543c1167fc1670b1affa47084d09aabd00bf WatchSource:0}: Error finding container 47b82663b31676ac614d5a632c30543c1167fc1670b1affa47084d09aabd00bf: Status 404 returned error can't find the container with id 47b82663b31676ac614d5a632c30543c1167fc1670b1affa47084d09aabd00bf Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.456983 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.466608 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.612354 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-dns-svc\") pod \"1653c411-d0eb-434b-9808-59c7aeee21af\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.612507 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9nzr\" (UniqueName: \"kubernetes.io/projected/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-kube-api-access-p9nzr\") pod \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.612558 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hxjm\" (UniqueName: \"kubernetes.io/projected/1653c411-d0eb-434b-9808-59c7aeee21af-kube-api-access-5hxjm\") pod \"1653c411-d0eb-434b-9808-59c7aeee21af\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.613409 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-config" (OuterVolumeSpecName: "config") pod "c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83" (UID: "c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.613459 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1653c411-d0eb-434b-9808-59c7aeee21af" (UID: "1653c411-d0eb-434b-9808-59c7aeee21af"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.613542 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-config\") pod \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\" (UID: \"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83\") " Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.613606 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-config\") pod \"1653c411-d0eb-434b-9808-59c7aeee21af\" (UID: \"1653c411-d0eb-434b-9808-59c7aeee21af\") " Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.613999 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.614019 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.614160 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-config" (OuterVolumeSpecName: "config") pod "1653c411-d0eb-434b-9808-59c7aeee21af" (UID: "1653c411-d0eb-434b-9808-59c7aeee21af"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.619124 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1653c411-d0eb-434b-9808-59c7aeee21af-kube-api-access-5hxjm" (OuterVolumeSpecName: "kube-api-access-5hxjm") pod "1653c411-d0eb-434b-9808-59c7aeee21af" (UID: "1653c411-d0eb-434b-9808-59c7aeee21af"). InnerVolumeSpecName "kube-api-access-5hxjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.619901 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-kube-api-access-p9nzr" (OuterVolumeSpecName: "kube-api-access-p9nzr") pod "c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83" (UID: "c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83"). InnerVolumeSpecName "kube-api-access-p9nzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.716046 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9nzr\" (UniqueName: \"kubernetes.io/projected/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83-kube-api-access-p9nzr\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.716084 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hxjm\" (UniqueName: \"kubernetes.io/projected/1653c411-d0eb-434b-9808-59c7aeee21af-kube-api-access-5hxjm\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.716094 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1653c411-d0eb-434b-9808-59c7aeee21af-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.954853 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"892117aa-d306-4a1d-bf6e-b203b6337537","Type":"ContainerStarted","Data":"53a7f3f169542d2a66c28107a568f2f29f80883acfd4ac2d4aa6255e68262080"} Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.956701 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerStarted","Data":"47b82663b31676ac614d5a632c30543c1167fc1670b1affa47084d09aabd00bf"} Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.959475 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca81ef12-eb13-468e-81fc-0fdf6aba8830","Type":"ContainerStarted","Data":"edef8b1b7915e1f7d6f114baa465a89b8aaf40344d1baefb410030093e2aaa7b"} Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.961033 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.961053 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-dwgcm" event={"ID":"1653c411-d0eb-434b-9808-59c7aeee21af","Type":"ContainerDied","Data":"11dcf6ab3b449ab0d1a108b292cd2b1b661687310b432340049c581201ed555d"} Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.963099 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" event={"ID":"c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83","Type":"ContainerDied","Data":"7dff3e0c49e1d31cabdcf270e0aaee5f5562e48fd15ca0053e12ffea99413b83"} Nov 28 16:31:14 crc kubenswrapper[4954]: I1128 16:31:14.963132 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-lgkkh" Nov 28 16:31:15 crc kubenswrapper[4954]: I1128 16:31:15.029080 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lgkkh"] Nov 28 16:31:15 crc kubenswrapper[4954]: I1128 16:31:15.034994 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-lgkkh"] Nov 28 16:31:15 crc kubenswrapper[4954]: I1128 16:31:15.055846 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-dwgcm"] Nov 28 16:31:15 crc kubenswrapper[4954]: I1128 16:31:15.063268 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-dwgcm"] Nov 28 16:31:15 crc kubenswrapper[4954]: I1128 16:31:15.869558 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1653c411-d0eb-434b-9808-59c7aeee21af" path="/var/lib/kubelet/pods/1653c411-d0eb-434b-9808-59c7aeee21af/volumes" Nov 28 16:31:15 crc kubenswrapper[4954]: I1128 16:31:15.870219 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83" path="/var/lib/kubelet/pods/c3f1bbaf-dbfc-4ba1-8e98-165ce2ec5c83/volumes" Nov 28 16:31:17 crc kubenswrapper[4954]: I1128 16:31:17.985945 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2fb4ddd8-d914-431c-a39f-28a0c6b45354","Type":"ContainerStarted","Data":"a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513"} Nov 28 16:31:17 crc kubenswrapper[4954]: I1128 16:31:17.988964 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt" event={"ID":"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1","Type":"ContainerStarted","Data":"f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326"} Nov 28 16:31:17 crc kubenswrapper[4954]: I1128 16:31:17.989453 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-vmnzt" Nov 28 16:31:17 crc kubenswrapper[4954]: I1128 16:31:17.991449 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6","Type":"ContainerStarted","Data":"a8434b10b60eb2a163aaaf4ff3727ed2fe9014b9cf717f4ce97fcc65af984b89"} Nov 28 16:31:17 crc kubenswrapper[4954]: I1128 16:31:17.992516 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"892117aa-d306-4a1d-bf6e-b203b6337537","Type":"ContainerStarted","Data":"107051ce6919142522558805719fa4abac53f52c0b68b1e403a6ef8355295c53"} Nov 28 16:31:17 crc kubenswrapper[4954]: I1128 16:31:17.993461 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerStarted","Data":"9f05ff47002e77f6711c2164ddba94188c5ecac38bf234177c69c0542ddbe01f"} Nov 28 16:31:18 crc kubenswrapper[4954]: I1128 16:31:18.054093 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-vmnzt" podStartSLOduration=27.717514355 podStartE2EDuration="34.054071784s" podCreationTimestamp="2025-11-28 16:30:44 +0000 UTC" firstStartedPulling="2025-11-28 16:31:11.201129733 +0000 UTC m=+1224.592798274" lastFinishedPulling="2025-11-28 16:31:17.537687162 +0000 UTC m=+1230.929355703" observedRunningTime="2025-11-28 16:31:18.050827782 +0000 UTC m=+1231.442496333" watchObservedRunningTime="2025-11-28 16:31:18.054071784 +0000 UTC m=+1231.445740325" Nov 28 16:31:19 crc kubenswrapper[4954]: I1128 16:31:19.004673 4954 generic.go:334] "Generic (PLEG): container finished" podID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerID="9f05ff47002e77f6711c2164ddba94188c5ecac38bf234177c69c0542ddbe01f" exitCode=0 Nov 28 16:31:19 crc kubenswrapper[4954]: I1128 16:31:19.004784 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerDied","Data":"9f05ff47002e77f6711c2164ddba94188c5ecac38bf234177c69c0542ddbe01f"} Nov 28 16:31:20 crc kubenswrapper[4954]: I1128 16:31:20.017049 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerStarted","Data":"1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90"} Nov 28 16:31:21 crc kubenswrapper[4954]: I1128 16:31:21.026088 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerStarted","Data":"7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc"} Nov 28 16:31:21 crc kubenswrapper[4954]: I1128 16:31:21.026501 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:31:21 crc kubenswrapper[4954]: I1128 16:31:21.026542 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:31:21 crc kubenswrapper[4954]: I1128 16:31:21.051732 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-d4vfs" podStartSLOduration=33.886801542 podStartE2EDuration="37.051707677s" podCreationTimestamp="2025-11-28 16:30:44 +0000 UTC" firstStartedPulling="2025-11-28 16:31:14.31541552 +0000 UTC m=+1227.707084061" lastFinishedPulling="2025-11-28 16:31:17.480321655 +0000 UTC m=+1230.871990196" observedRunningTime="2025-11-28 16:31:21.046836414 +0000 UTC m=+1234.438504965" watchObservedRunningTime="2025-11-28 16:31:21.051707677 +0000 UTC m=+1234.443376218" Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.033876 4954 generic.go:334] "Generic (PLEG): container finished" podID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerID="a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513" exitCode=0 Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.033981 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2fb4ddd8-d914-431c-a39f-28a0c6b45354","Type":"ContainerDied","Data":"a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513"} Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.037987 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d348de7-2e67-41df-8d59-4692491ea145","Type":"ContainerStarted","Data":"8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664"} Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.040260 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6","Type":"ContainerStarted","Data":"08f7109509c86b6eda033eb417ad90a9b3211cb608828eb97354929fe65260e4"} Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.042140 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"892117aa-d306-4a1d-bf6e-b203b6337537","Type":"ContainerStarted","Data":"0bc97c121bcab5b752053d9d791ed5b026bc09edb0e37b0c2ab456c187dcfed8"} Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.077351 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=31.514893418 podStartE2EDuration="38.077308355s" podCreationTimestamp="2025-11-28 16:30:44 +0000 UTC" firstStartedPulling="2025-11-28 16:31:14.286276017 +0000 UTC m=+1227.677944558" lastFinishedPulling="2025-11-28 16:31:20.848690954 +0000 UTC m=+1234.240359495" observedRunningTime="2025-11-28 16:31:22.072941957 +0000 UTC m=+1235.464610488" watchObservedRunningTime="2025-11-28 16:31:22.077308355 +0000 UTC m=+1235.468976886" Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.116252 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=28.905642956 podStartE2EDuration="36.116235414s" podCreationTimestamp="2025-11-28 16:30:46 +0000 UTC" firstStartedPulling="2025-11-28 16:31:13.610656866 +0000 UTC m=+1227.002325407" lastFinishedPulling="2025-11-28 16:31:20.821249324 +0000 UTC m=+1234.212917865" observedRunningTime="2025-11-28 16:31:22.110768283 +0000 UTC m=+1235.502436824" watchObservedRunningTime="2025-11-28 16:31:22.116235414 +0000 UTC m=+1235.507903955" Nov 28 16:31:22 crc kubenswrapper[4954]: I1128 16:31:22.832806 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 16:31:23 crc kubenswrapper[4954]: I1128 16:31:23.050636 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8a252e1a-c96a-4f98-b24e-b224fedf344c","Type":"ContainerStarted","Data":"350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd"} Nov 28 16:31:23 crc kubenswrapper[4954]: I1128 16:31:23.052966 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2fb4ddd8-d914-431c-a39f-28a0c6b45354","Type":"ContainerStarted","Data":"352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99"} Nov 28 16:31:23 crc kubenswrapper[4954]: I1128 16:31:23.832813 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 16:31:23 crc kubenswrapper[4954]: I1128 16:31:23.877153 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=9.262948216 podStartE2EDuration="46.877108212s" podCreationTimestamp="2025-11-28 16:30:37 +0000 UTC" firstStartedPulling="2025-11-28 16:30:39.927585734 +0000 UTC m=+1193.319254275" lastFinishedPulling="2025-11-28 16:31:17.54174573 +0000 UTC m=+1230.933414271" observedRunningTime="2025-11-28 16:31:23.103010995 +0000 UTC m=+1236.494679536" watchObservedRunningTime="2025-11-28 16:31:23.877108212 +0000 UTC m=+1237.268776753" Nov 28 16:31:23 crc kubenswrapper[4954]: I1128 16:31:23.883363 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.113100 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.428110 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-cfvbr"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.456558 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-5gr26"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.457786 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.472137 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-5gr26"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.479300 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.493131 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-55p57"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.494290 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.495872 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.498888 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-55p57"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.523395 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-config\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.523459 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpjrr\" (UniqueName: \"kubernetes.io/projected/f5bedb6d-6351-4fdd-be82-103054905458-kube-api-access-qpjrr\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.523840 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.523969 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.610198 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.625778 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.625833 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovs-rundir\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.625986 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-config\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.626064 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-config\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.626101 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpjrr\" (UniqueName: \"kubernetes.io/projected/f5bedb6d-6351-4fdd-be82-103054905458-kube-api-access-qpjrr\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.626173 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovn-rundir\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.626293 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgb9w\" (UniqueName: \"kubernetes.io/projected/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-kube-api-access-jgb9w\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.626388 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.626438 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-combined-ca-bundle\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.626516 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.627032 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-config\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.627201 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.627391 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.657223 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpjrr\" (UniqueName: \"kubernetes.io/projected/f5bedb6d-6351-4fdd-be82-103054905458-kube-api-access-qpjrr\") pod \"dnsmasq-dns-6bc7876d45-5gr26\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.688860 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7s4hp"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.728072 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgb9w\" (UniqueName: \"kubernetes.io/projected/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-kube-api-access-jgb9w\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.728153 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-combined-ca-bundle\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.728196 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.728213 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovs-rundir\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.728240 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-config\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.728303 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovn-rundir\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.729294 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-config\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.729323 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovs-rundir\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.729393 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovn-rundir\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.731985 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.736016 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-combined-ca-bundle\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.748113 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgb9w\" (UniqueName: \"kubernetes.io/projected/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-kube-api-access-jgb9w\") pod \"ovn-controller-metrics-55p57\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.748190 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-2cpmm"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.749776 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.752264 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.754333 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2cpmm"] Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.794310 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.813600 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.829773 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.829884 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.829957 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr5fz\" (UniqueName: \"kubernetes.io/projected/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-kube-api-access-hr5fz\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.830016 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-dns-svc\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.830043 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-config\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:24 crc kubenswrapper[4954]: I1128 16:31:24.866683 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.023486 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-dns-svc\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.023572 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-config\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.023737 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.023871 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.023912 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hr5fz\" (UniqueName: \"kubernetes.io/projected/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-kube-api-access-hr5fz\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.024980 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-dns-svc\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.028538 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-config\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.028828 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.032043 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.057885 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr5fz\" (UniqueName: \"kubernetes.io/projected/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-kube-api-access-hr5fz\") pod \"dnsmasq-dns-8554648995-2cpmm\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.109737 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.154410 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.171010 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.289091 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.323642 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.333486 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m4hn\" (UniqueName: \"kubernetes.io/projected/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-kube-api-access-9m4hn\") pod \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.333667 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-dns-svc\") pod \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.333734 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-config\") pod \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\" (UID: \"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2\") " Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.335990 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-config" (OuterVolumeSpecName: "config") pod "f5a5a42d-2215-4bcb-a907-23a25bc6d5e2" (UID: "f5a5a42d-2215-4bcb-a907-23a25bc6d5e2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.342607 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f5a5a42d-2215-4bcb-a907-23a25bc6d5e2" (UID: "f5a5a42d-2215-4bcb-a907-23a25bc6d5e2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.348206 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-kube-api-access-9m4hn" (OuterVolumeSpecName: "kube-api-access-9m4hn") pod "f5a5a42d-2215-4bcb-a907-23a25bc6d5e2" (UID: "f5a5a42d-2215-4bcb-a907-23a25bc6d5e2"). InnerVolumeSpecName "kube-api-access-9m4hn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.436588 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tct8t\" (UniqueName: \"kubernetes.io/projected/4d22fef9-d66b-44b7-b13b-d39bb237622d-kube-api-access-tct8t\") pod \"4d22fef9-d66b-44b7-b13b-d39bb237622d\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.436663 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-dns-svc\") pod \"4d22fef9-d66b-44b7-b13b-d39bb237622d\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.436738 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-config\") pod \"4d22fef9-d66b-44b7-b13b-d39bb237622d\" (UID: \"4d22fef9-d66b-44b7-b13b-d39bb237622d\") " Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.437050 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m4hn\" (UniqueName: \"kubernetes.io/projected/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-kube-api-access-9m4hn\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.437060 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.437069 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.437404 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-config" (OuterVolumeSpecName: "config") pod "4d22fef9-d66b-44b7-b13b-d39bb237622d" (UID: "4d22fef9-d66b-44b7-b13b-d39bb237622d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.470443 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d22fef9-d66b-44b7-b13b-d39bb237622d" (UID: "4d22fef9-d66b-44b7-b13b-d39bb237622d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.471769 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d22fef9-d66b-44b7-b13b-d39bb237622d-kube-api-access-tct8t" (OuterVolumeSpecName: "kube-api-access-tct8t") pod "4d22fef9-d66b-44b7-b13b-d39bb237622d" (UID: "4d22fef9-d66b-44b7-b13b-d39bb237622d"). InnerVolumeSpecName "kube-api-access-tct8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.482287 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.488848 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.494906 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.495108 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.495209 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-2qd29" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.495313 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.528588 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.562059 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.562085 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d22fef9-d66b-44b7-b13b-d39bb237622d-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.562095 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tct8t\" (UniqueName: \"kubernetes.io/projected/4d22fef9-d66b-44b7-b13b-d39bb237622d-kube-api-access-tct8t\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:25 crc kubenswrapper[4954]: W1128 16:31:25.600670 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5bedb6d_6351_4fdd_be82_103054905458.slice/crio-9adb0461b5db347006b7e68b873782474965e3bd2a496a6e9ca88c829f98476f WatchSource:0}: Error finding container 9adb0461b5db347006b7e68b873782474965e3bd2a496a6e9ca88c829f98476f: Status 404 returned error can't find the container with id 9adb0461b5db347006b7e68b873782474965e3bd2a496a6e9ca88c829f98476f Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.608457 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-5gr26"] Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.624594 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-55p57"] Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.664177 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.664712 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.664817 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-config\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.664921 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.665145 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-scripts\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.665160 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfwwp\" (UniqueName: \"kubernetes.io/projected/8bff6e67-d9f4-4952-992d-1fa362d23a5c-kube-api-access-bfwwp\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.665218 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.766272 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.766427 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-scripts\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.766456 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfwwp\" (UniqueName: \"kubernetes.io/projected/8bff6e67-d9f4-4952-992d-1fa362d23a5c-kube-api-access-bfwwp\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.766491 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.766516 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.766598 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.766641 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-config\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.767056 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.767422 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-scripts\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.767869 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-config\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.771243 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.772416 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.775211 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.784460 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfwwp\" (UniqueName: \"kubernetes.io/projected/8bff6e67-d9f4-4952-992d-1fa362d23a5c-kube-api-access-bfwwp\") pod \"ovn-northd-0\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " pod="openstack/ovn-northd-0" Nov 28 16:31:25 crc kubenswrapper[4954]: I1128 16:31:25.885233 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:31:26 crc kubenswrapper[4954]: E1128 16:31:26.049630 4954 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d22fef9_d66b_44b7_b13b_d39bb237622d.slice/crio-4b3b96c5a743dd205ac1e7db27b348a6e4804f0e6f39103b9a7047ca23c89867\": RecentStats: unable to find data in memory cache]" Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.056636 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2cpmm"] Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.117861 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-55p57" event={"ID":"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa","Type":"ContainerStarted","Data":"e58182aeb38cd0e17ba7dc4d14483d0f519a74821e8ce0ee1c137548d93895ba"} Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.119156 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" event={"ID":"4d22fef9-d66b-44b7-b13b-d39bb237622d","Type":"ContainerDied","Data":"4b3b96c5a743dd205ac1e7db27b348a6e4804f0e6f39103b9a7047ca23c89867"} Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.119303 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7s4hp" Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.121354 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" event={"ID":"f5a5a42d-2215-4bcb-a907-23a25bc6d5e2","Type":"ContainerDied","Data":"e84a42f7f07544191c8230b009ab73195f8e8289500a3db764e8ac994ca701bc"} Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.121438 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-cfvbr" Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.123208 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2cpmm" event={"ID":"0ba93bae-c03c-4d0b-b911-cb47ab7569bc","Type":"ContainerStarted","Data":"c9186b853e3715fa9839bf6c7b1b73d8db21cb9e3eac94e09b693a04afd43e41"} Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.126430 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" event={"ID":"f5bedb6d-6351-4fdd-be82-103054905458","Type":"ContainerStarted","Data":"9adb0461b5db347006b7e68b873782474965e3bd2a496a6e9ca88c829f98476f"} Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.129428 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"afe4302a-413f-48e1-90a9-3f1178e5c6f7","Type":"ContainerStarted","Data":"13ae9aa3934e30fe09e390d2eb8d342d20df09c16cb5c9d3cd643b0cba0bac5e"} Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.175202 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7s4hp"] Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.195146 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7s4hp"] Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.209228 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-cfvbr"] Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.222615 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-cfvbr"] Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.223330 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=4.161862187 podStartE2EDuration="48.223309582s" podCreationTimestamp="2025-11-28 16:30:38 +0000 UTC" firstStartedPulling="2025-11-28 16:30:40.281863736 +0000 UTC m=+1193.673532267" lastFinishedPulling="2025-11-28 16:31:24.343311121 +0000 UTC m=+1237.734979662" observedRunningTime="2025-11-28 16:31:26.215129816 +0000 UTC m=+1239.606798367" watchObservedRunningTime="2025-11-28 16:31:26.223309582 +0000 UTC m=+1239.614978123" Nov 28 16:31:26 crc kubenswrapper[4954]: I1128 16:31:26.403822 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:31:27 crc kubenswrapper[4954]: I1128 16:31:27.138753 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8bff6e67-d9f4-4952-992d-1fa362d23a5c","Type":"ContainerStarted","Data":"6f923c39766002793dc5627e20b72140d6662eb95b5f71078335c85713439d31"} Nov 28 16:31:27 crc kubenswrapper[4954]: I1128 16:31:27.867715 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d22fef9-d66b-44b7-b13b-d39bb237622d" path="/var/lib/kubelet/pods/4d22fef9-d66b-44b7-b13b-d39bb237622d/volumes" Nov 28 16:31:27 crc kubenswrapper[4954]: I1128 16:31:27.868850 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5a5a42d-2215-4bcb-a907-23a25bc6d5e2" path="/var/lib/kubelet/pods/f5a5a42d-2215-4bcb-a907-23a25bc6d5e2/volumes" Nov 28 16:31:28 crc kubenswrapper[4954]: I1128 16:31:28.148274 4954 generic.go:334] "Generic (PLEG): container finished" podID="f5bedb6d-6351-4fdd-be82-103054905458" containerID="941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d" exitCode=0 Nov 28 16:31:28 crc kubenswrapper[4954]: I1128 16:31:28.148352 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" event={"ID":"f5bedb6d-6351-4fdd-be82-103054905458","Type":"ContainerDied","Data":"941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d"} Nov 28 16:31:28 crc kubenswrapper[4954]: I1128 16:31:28.153023 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-55p57" event={"ID":"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa","Type":"ContainerStarted","Data":"e156c21bec3f6d7422ff833d8e664020da9203178d2bf9a0025944bc8713896f"} Nov 28 16:31:28 crc kubenswrapper[4954]: I1128 16:31:28.154982 4954 generic.go:334] "Generic (PLEG): container finished" podID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerID="8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa" exitCode=0 Nov 28 16:31:28 crc kubenswrapper[4954]: I1128 16:31:28.155029 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2cpmm" event={"ID":"0ba93bae-c03c-4d0b-b911-cb47ab7569bc","Type":"ContainerDied","Data":"8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa"} Nov 28 16:31:28 crc kubenswrapper[4954]: I1128 16:31:28.190160 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-55p57" podStartSLOduration=4.190107523 podStartE2EDuration="4.190107523s" podCreationTimestamp="2025-11-28 16:31:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:28.184318341 +0000 UTC m=+1241.575986882" watchObservedRunningTime="2025-11-28 16:31:28.190107523 +0000 UTC m=+1241.581776064" Nov 28 16:31:28 crc kubenswrapper[4954]: E1128 16:31:28.297715 4954 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.173:39656->38.102.83.173:35311: write tcp 38.102.83.173:39656->38.102.83.173:35311: write: broken pipe Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.038634 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.039000 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.162670 4954 generic.go:334] "Generic (PLEG): container finished" podID="1d348de7-2e67-41df-8d59-4692491ea145" containerID="8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664" exitCode=0 Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.162748 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d348de7-2e67-41df-8d59-4692491ea145","Type":"ContainerDied","Data":"8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664"} Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.166806 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" event={"ID":"f5bedb6d-6351-4fdd-be82-103054905458","Type":"ContainerStarted","Data":"0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6"} Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.166949 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.207567 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" podStartSLOduration=3.238149763 podStartE2EDuration="5.207550556s" podCreationTimestamp="2025-11-28 16:31:24 +0000 UTC" firstStartedPulling="2025-11-28 16:31:25.61850875 +0000 UTC m=+1239.010177291" lastFinishedPulling="2025-11-28 16:31:27.587909543 +0000 UTC m=+1240.979578084" observedRunningTime="2025-11-28 16:31:29.205895043 +0000 UTC m=+1242.597563584" watchObservedRunningTime="2025-11-28 16:31:29.207550556 +0000 UTC m=+1242.599219097" Nov 28 16:31:29 crc kubenswrapper[4954]: I1128 16:31:29.346215 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 16:31:30 crc kubenswrapper[4954]: I1128 16:31:30.181433 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2cpmm" event={"ID":"0ba93bae-c03c-4d0b-b911-cb47ab7569bc","Type":"ContainerStarted","Data":"5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd"} Nov 28 16:31:30 crc kubenswrapper[4954]: I1128 16:31:30.181977 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:30 crc kubenswrapper[4954]: I1128 16:31:30.189399 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8bff6e67-d9f4-4952-992d-1fa362d23a5c","Type":"ContainerStarted","Data":"a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b"} Nov 28 16:31:30 crc kubenswrapper[4954]: I1128 16:31:30.205048 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-2cpmm" podStartSLOduration=4.530913372 podStartE2EDuration="6.205028582s" podCreationTimestamp="2025-11-28 16:31:24 +0000 UTC" firstStartedPulling="2025-11-28 16:31:26.067942223 +0000 UTC m=+1239.459610764" lastFinishedPulling="2025-11-28 16:31:27.742057433 +0000 UTC m=+1241.133725974" observedRunningTime="2025-11-28 16:31:30.201169171 +0000 UTC m=+1243.592837732" watchObservedRunningTime="2025-11-28 16:31:30.205028582 +0000 UTC m=+1243.596697123" Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.200098 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d348de7-2e67-41df-8d59-4692491ea145","Type":"ContainerStarted","Data":"8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07"} Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.202258 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8bff6e67-d9f4-4952-992d-1fa362d23a5c","Type":"ContainerStarted","Data":"022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6"} Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.202675 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.204276 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc71f947-acc3-4867-8e70-0f2def81dc03","Type":"ContainerStarted","Data":"26eea4df2280948534275499d9644069915f6fc0ceb14efb26ac10a1655c3f0d"} Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.204491 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.226596 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371981.628267 podStartE2EDuration="55.226508561s" podCreationTimestamp="2025-11-28 16:30:36 +0000 UTC" firstStartedPulling="2025-11-28 16:30:38.533757548 +0000 UTC m=+1191.925426089" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:31.222576308 +0000 UTC m=+1244.614244889" watchObservedRunningTime="2025-11-28 16:31:31.226508561 +0000 UTC m=+1244.618177122" Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.234960 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.266836 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.8338323450000003 podStartE2EDuration="6.266815384s" podCreationTimestamp="2025-11-28 16:31:25 +0000 UTC" firstStartedPulling="2025-11-28 16:31:26.407834215 +0000 UTC m=+1239.799502756" lastFinishedPulling="2025-11-28 16:31:28.840817254 +0000 UTC m=+1242.232485795" observedRunningTime="2025-11-28 16:31:31.265795191 +0000 UTC m=+1244.657463782" watchObservedRunningTime="2025-11-28 16:31:31.266815384 +0000 UTC m=+1244.658483935" Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.271818 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=7.716016707 podStartE2EDuration="50.27180119s" podCreationTimestamp="2025-11-28 16:30:41 +0000 UTC" firstStartedPulling="2025-11-28 16:30:47.385080791 +0000 UTC m=+1200.776749342" lastFinishedPulling="2025-11-28 16:31:29.940865284 +0000 UTC m=+1243.332533825" observedRunningTime="2025-11-28 16:31:31.241919724 +0000 UTC m=+1244.633588275" watchObservedRunningTime="2025-11-28 16:31:31.27180119 +0000 UTC m=+1244.663469731" Nov 28 16:31:31 crc kubenswrapper[4954]: I1128 16:31:31.308598 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 16:31:34 crc kubenswrapper[4954]: I1128 16:31:34.346336 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 16:31:34 crc kubenswrapper[4954]: I1128 16:31:34.816765 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:35 crc kubenswrapper[4954]: I1128 16:31:35.326750 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:35 crc kubenswrapper[4954]: I1128 16:31:35.393144 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-5gr26"] Nov 28 16:31:35 crc kubenswrapper[4954]: I1128 16:31:35.393553 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" podUID="f5bedb6d-6351-4fdd-be82-103054905458" containerName="dnsmasq-dns" containerID="cri-o://0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6" gracePeriod=10 Nov 28 16:31:35 crc kubenswrapper[4954]: I1128 16:31:35.902882 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.069400 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-dns-svc\") pod \"f5bedb6d-6351-4fdd-be82-103054905458\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.070092 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpjrr\" (UniqueName: \"kubernetes.io/projected/f5bedb6d-6351-4fdd-be82-103054905458-kube-api-access-qpjrr\") pod \"f5bedb6d-6351-4fdd-be82-103054905458\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.070611 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-ovsdbserver-sb\") pod \"f5bedb6d-6351-4fdd-be82-103054905458\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.070938 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-config\") pod \"f5bedb6d-6351-4fdd-be82-103054905458\" (UID: \"f5bedb6d-6351-4fdd-be82-103054905458\") " Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.075177 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5bedb6d-6351-4fdd-be82-103054905458-kube-api-access-qpjrr" (OuterVolumeSpecName: "kube-api-access-qpjrr") pod "f5bedb6d-6351-4fdd-be82-103054905458" (UID: "f5bedb6d-6351-4fdd-be82-103054905458"). InnerVolumeSpecName "kube-api-access-qpjrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.116392 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f5bedb6d-6351-4fdd-be82-103054905458" (UID: "f5bedb6d-6351-4fdd-be82-103054905458"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.117070 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f5bedb6d-6351-4fdd-be82-103054905458" (UID: "f5bedb6d-6351-4fdd-be82-103054905458"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.117688 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-config" (OuterVolumeSpecName: "config") pod "f5bedb6d-6351-4fdd-be82-103054905458" (UID: "f5bedb6d-6351-4fdd-be82-103054905458"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.172331 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.172416 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.172427 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpjrr\" (UniqueName: \"kubernetes.io/projected/f5bedb6d-6351-4fdd-be82-103054905458-kube-api-access-qpjrr\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.172438 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f5bedb6d-6351-4fdd-be82-103054905458-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.244480 4954 generic.go:334] "Generic (PLEG): container finished" podID="f5bedb6d-6351-4fdd-be82-103054905458" containerID="0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6" exitCode=0 Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.244542 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" event={"ID":"f5bedb6d-6351-4fdd-be82-103054905458","Type":"ContainerDied","Data":"0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6"} Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.244570 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" event={"ID":"f5bedb6d-6351-4fdd-be82-103054905458","Type":"ContainerDied","Data":"9adb0461b5db347006b7e68b873782474965e3bd2a496a6e9ca88c829f98476f"} Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.244588 4954 scope.go:117] "RemoveContainer" containerID="0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.244702 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-5gr26" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.277441 4954 scope.go:117] "RemoveContainer" containerID="941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.301090 4954 scope.go:117] "RemoveContainer" containerID="0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.301540 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-5gr26"] Nov 28 16:31:36 crc kubenswrapper[4954]: E1128 16:31:36.301575 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6\": container with ID starting with 0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6 not found: ID does not exist" containerID="0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.301627 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6"} err="failed to get container status \"0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6\": rpc error: code = NotFound desc = could not find container \"0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6\": container with ID starting with 0f4a5c84ce1297d8030c38ef621c18a19171bb8d7ec025fe99a5e7706fe16fb6 not found: ID does not exist" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.301666 4954 scope.go:117] "RemoveContainer" containerID="941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d" Nov 28 16:31:36 crc kubenswrapper[4954]: E1128 16:31:36.301974 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d\": container with ID starting with 941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d not found: ID does not exist" containerID="941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.302041 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d"} err="failed to get container status \"941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d\": rpc error: code = NotFound desc = could not find container \"941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d\": container with ID starting with 941118e514a3837224c8de60ac49eea5434faeb4c76f325269d21277bea1ad6d not found: ID does not exist" Nov 28 16:31:36 crc kubenswrapper[4954]: I1128 16:31:36.307204 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-5gr26"] Nov 28 16:31:37 crc kubenswrapper[4954]: I1128 16:31:37.692109 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 16:31:37 crc kubenswrapper[4954]: I1128 16:31:37.692369 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 16:31:37 crc kubenswrapper[4954]: I1128 16:31:37.800051 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 16:31:37 crc kubenswrapper[4954]: I1128 16:31:37.867873 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5bedb6d-6351-4fdd-be82-103054905458" path="/var/lib/kubelet/pods/f5bedb6d-6351-4fdd-be82-103054905458/volumes" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.359397 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.989929 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-bwl88"] Nov 28 16:31:38 crc kubenswrapper[4954]: E1128 16:31:38.990489 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5bedb6d-6351-4fdd-be82-103054905458" containerName="dnsmasq-dns" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.990515 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5bedb6d-6351-4fdd-be82-103054905458" containerName="dnsmasq-dns" Nov 28 16:31:38 crc kubenswrapper[4954]: E1128 16:31:38.990587 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5bedb6d-6351-4fdd-be82-103054905458" containerName="init" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.990602 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5bedb6d-6351-4fdd-be82-103054905458" containerName="init" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.990880 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5bedb6d-6351-4fdd-be82-103054905458" containerName="dnsmasq-dns" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.991804 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.995766 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-80ff-account-create-update-b5gtr"] Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.997040 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:38 crc kubenswrapper[4954]: I1128 16:31:38.998590 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.002443 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-80ff-account-create-update-b5gtr"] Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.007680 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-bwl88"] Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.129152 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlh2f\" (UniqueName: \"kubernetes.io/projected/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-kube-api-access-xlh2f\") pod \"keystone-80ff-account-create-update-b5gtr\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.129223 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-operator-scripts\") pod \"keystone-80ff-account-create-update-b5gtr\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.129275 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-operator-scripts\") pod \"keystone-db-create-bwl88\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.129590 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s7jq\" (UniqueName: \"kubernetes.io/projected/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-kube-api-access-5s7jq\") pod \"keystone-db-create-bwl88\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.196949 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-djcvh"] Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.198114 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.217432 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-djcvh"] Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.230854 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s7jq\" (UniqueName: \"kubernetes.io/projected/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-kube-api-access-5s7jq\") pod \"keystone-db-create-bwl88\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.230936 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlh2f\" (UniqueName: \"kubernetes.io/projected/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-kube-api-access-xlh2f\") pod \"keystone-80ff-account-create-update-b5gtr\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.230976 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-operator-scripts\") pod \"keystone-80ff-account-create-update-b5gtr\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.231006 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-operator-scripts\") pod \"keystone-db-create-bwl88\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.233493 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-operator-scripts\") pod \"keystone-db-create-bwl88\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.233492 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-operator-scripts\") pod \"keystone-80ff-account-create-update-b5gtr\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.248739 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s7jq\" (UniqueName: \"kubernetes.io/projected/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-kube-api-access-5s7jq\") pod \"keystone-db-create-bwl88\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.250704 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlh2f\" (UniqueName: \"kubernetes.io/projected/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-kube-api-access-xlh2f\") pod \"keystone-80ff-account-create-update-b5gtr\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.305837 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-4c07-account-create-update-ph8bg"] Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.306928 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.309672 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.315738 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.330822 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4c07-account-create-update-ph8bg"] Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.332312 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7jgm\" (UniqueName: \"kubernetes.io/projected/8a6b461d-5be3-4805-9ba7-36cffead16e9-kube-api-access-b7jgm\") pod \"placement-db-create-djcvh\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.332357 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a6b461d-5be3-4805-9ba7-36cffead16e9-operator-scripts\") pod \"placement-db-create-djcvh\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.334642 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.433849 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7jgm\" (UniqueName: \"kubernetes.io/projected/8a6b461d-5be3-4805-9ba7-36cffead16e9-kube-api-access-b7jgm\") pod \"placement-db-create-djcvh\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.434135 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a6b461d-5be3-4805-9ba7-36cffead16e9-operator-scripts\") pod \"placement-db-create-djcvh\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.434181 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nr9z\" (UniqueName: \"kubernetes.io/projected/8b16e07c-9b6f-429b-b239-3efa92727910-kube-api-access-7nr9z\") pod \"placement-4c07-account-create-update-ph8bg\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.434247 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b16e07c-9b6f-429b-b239-3efa92727910-operator-scripts\") pod \"placement-4c07-account-create-update-ph8bg\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.448844 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a6b461d-5be3-4805-9ba7-36cffead16e9-operator-scripts\") pod \"placement-db-create-djcvh\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.461089 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7jgm\" (UniqueName: \"kubernetes.io/projected/8a6b461d-5be3-4805-9ba7-36cffead16e9-kube-api-access-b7jgm\") pod \"placement-db-create-djcvh\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.512045 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-djcvh" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.535600 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nr9z\" (UniqueName: \"kubernetes.io/projected/8b16e07c-9b6f-429b-b239-3efa92727910-kube-api-access-7nr9z\") pod \"placement-4c07-account-create-update-ph8bg\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.535678 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b16e07c-9b6f-429b-b239-3efa92727910-operator-scripts\") pod \"placement-4c07-account-create-update-ph8bg\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.536643 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b16e07c-9b6f-429b-b239-3efa92727910-operator-scripts\") pod \"placement-4c07-account-create-update-ph8bg\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.553500 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nr9z\" (UniqueName: \"kubernetes.io/projected/8b16e07c-9b6f-429b-b239-3efa92727910-kube-api-access-7nr9z\") pod \"placement-4c07-account-create-update-ph8bg\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.639270 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.936261 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-bwl88"] Nov 28 16:31:39 crc kubenswrapper[4954]: W1128 16:31:39.939087 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c69ec0b_b98f_4181_99bb_7b7f29c20fa9.slice/crio-c627a6ebec39dbd4da5c0b26522113435ed3d6594e7e385a365464c0af60a3e7 WatchSource:0}: Error finding container c627a6ebec39dbd4da5c0b26522113435ed3d6594e7e385a365464c0af60a3e7: Status 404 returned error can't find the container with id c627a6ebec39dbd4da5c0b26522113435ed3d6594e7e385a365464c0af60a3e7 Nov 28 16:31:39 crc kubenswrapper[4954]: I1128 16:31:39.943699 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-80ff-account-create-update-b5gtr"] Nov 28 16:31:40 crc kubenswrapper[4954]: I1128 16:31:40.018320 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-djcvh"] Nov 28 16:31:40 crc kubenswrapper[4954]: W1128 16:31:40.026567 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a6b461d_5be3_4805_9ba7_36cffead16e9.slice/crio-95a45465662797c6f4909e3753a8773e3a2028c35a577c3f0c088fb64015dc78 WatchSource:0}: Error finding container 95a45465662797c6f4909e3753a8773e3a2028c35a577c3f0c088fb64015dc78: Status 404 returned error can't find the container with id 95a45465662797c6f4909e3753a8773e3a2028c35a577c3f0c088fb64015dc78 Nov 28 16:31:40 crc kubenswrapper[4954]: I1128 16:31:40.292560 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-80ff-account-create-update-b5gtr" event={"ID":"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2","Type":"ContainerStarted","Data":"eb838218ca26754db98c82113b524723ced2179e83070c76d39753a42a63ff4c"} Nov 28 16:31:40 crc kubenswrapper[4954]: I1128 16:31:40.293559 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-djcvh" event={"ID":"8a6b461d-5be3-4805-9ba7-36cffead16e9","Type":"ContainerStarted","Data":"95a45465662797c6f4909e3753a8773e3a2028c35a577c3f0c088fb64015dc78"} Nov 28 16:31:40 crc kubenswrapper[4954]: I1128 16:31:40.294476 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bwl88" event={"ID":"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9","Type":"ContainerStarted","Data":"c627a6ebec39dbd4da5c0b26522113435ed3d6594e7e385a365464c0af60a3e7"} Nov 28 16:31:40 crc kubenswrapper[4954]: I1128 16:31:40.331732 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4c07-account-create-update-ph8bg"] Nov 28 16:31:40 crc kubenswrapper[4954]: I1128 16:31:40.943894 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.309911 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-80ff-account-create-update-b5gtr" event={"ID":"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2","Type":"ContainerStarted","Data":"8e4a60a0c9ebe4259702f5bf21ad46c42f85291c4afce54389154b16e2fc0736"} Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.314855 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4c07-account-create-update-ph8bg" event={"ID":"8b16e07c-9b6f-429b-b239-3efa92727910","Type":"ContainerStarted","Data":"7d88064efa113b13270a380653c1ca5ca6b16375b3af64a054f3dfe5666ce593"} Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.314898 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4c07-account-create-update-ph8bg" event={"ID":"8b16e07c-9b6f-429b-b239-3efa92727910","Type":"ContainerStarted","Data":"fbbe7c50e39b48b3d9349c71b472c51b21fb4d92fa470c11854a90ef2fc3a27f"} Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.323679 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-djcvh" event={"ID":"8a6b461d-5be3-4805-9ba7-36cffead16e9","Type":"ContainerStarted","Data":"5fa26db3f297ab8d0dc997e2fdc6ffd39edf2b0f78a79277102da75c5bcef787"} Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.337684 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bwl88" event={"ID":"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9","Type":"ContainerStarted","Data":"0ed6f2d737022bca1d619301fdbfeda02b49d72ac4c9728e5a5e8d61013a751f"} Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.344395 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-80ff-account-create-update-b5gtr" podStartSLOduration=3.344373611 podStartE2EDuration="3.344373611s" podCreationTimestamp="2025-11-28 16:31:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:41.337214917 +0000 UTC m=+1254.728883458" watchObservedRunningTime="2025-11-28 16:31:41.344373611 +0000 UTC m=+1254.736042152" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.374398 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-cjcq7"] Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.379239 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.381161 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-djcvh" podStartSLOduration=2.381143793 podStartE2EDuration="2.381143793s" podCreationTimestamp="2025-11-28 16:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:41.375348981 +0000 UTC m=+1254.767017522" watchObservedRunningTime="2025-11-28 16:31:41.381143793 +0000 UTC m=+1254.772812334" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.386483 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.396453 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-cjcq7"] Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.428376 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-4c07-account-create-update-ph8bg" podStartSLOduration=2.428352392 podStartE2EDuration="2.428352392s" podCreationTimestamp="2025-11-28 16:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:41.415752568 +0000 UTC m=+1254.807421109" watchObservedRunningTime="2025-11-28 16:31:41.428352392 +0000 UTC m=+1254.820020933" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.439149 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-bwl88" podStartSLOduration=3.439129271 podStartE2EDuration="3.439129271s" podCreationTimestamp="2025-11-28 16:31:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:41.434710082 +0000 UTC m=+1254.826378623" watchObservedRunningTime="2025-11-28 16:31:41.439129271 +0000 UTC m=+1254.830797812" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.483517 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.483603 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-config\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.483628 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j2pk\" (UniqueName: \"kubernetes.io/projected/d3e62442-5977-4a34-b774-d11724b64832-kube-api-access-9j2pk\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.483686 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.483737 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.589396 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.589477 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.589545 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-config\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.589575 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j2pk\" (UniqueName: \"kubernetes.io/projected/d3e62442-5977-4a34-b774-d11724b64832-kube-api-access-9j2pk\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.589642 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.590428 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.590518 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.590636 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.591166 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-config\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.614494 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j2pk\" (UniqueName: \"kubernetes.io/projected/d3e62442-5977-4a34-b774-d11724b64832-kube-api-access-9j2pk\") pod \"dnsmasq-dns-b8fbc5445-cjcq7\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:41 crc kubenswrapper[4954]: I1128 16:31:41.718589 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.156299 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-cjcq7"] Nov 28 16:31:42 crc kubenswrapper[4954]: W1128 16:31:42.166786 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3e62442_5977_4a34_b774_d11724b64832.slice/crio-da42f3af8f9d562458d0bbdf43ef602d6a3f4a3a750f534cc4baa2a95753f550 WatchSource:0}: Error finding container da42f3af8f9d562458d0bbdf43ef602d6a3f4a3a750f534cc4baa2a95753f550: Status 404 returned error can't find the container with id da42f3af8f9d562458d0bbdf43ef602d6a3f4a3a750f534cc4baa2a95753f550 Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.360428 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" event={"ID":"d3e62442-5977-4a34-b774-d11724b64832","Type":"ContainerStarted","Data":"da42f3af8f9d562458d0bbdf43ef602d6a3f4a3a750f534cc4baa2a95753f550"} Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.505613 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.511041 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.514763 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.514839 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.514839 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-k4sm4" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.515951 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.529862 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.605274 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-cache\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.605371 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-947jp\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-kube-api-access-947jp\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.605412 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.605440 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.605517 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-lock\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.707354 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-cache\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.707491 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-947jp\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-kube-api-access-947jp\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.707557 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.707585 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.707664 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-lock\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: E1128 16:31:42.708079 4954 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:31:42 crc kubenswrapper[4954]: E1128 16:31:42.708114 4954 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.708157 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-lock\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.708164 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-cache\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: E1128 16:31:42.708163 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift podName:6e076e8f-11b0-48ea-a26c-302df7a0ed2f nodeName:}" failed. No retries permitted until 2025-11-28 16:31:43.208141955 +0000 UTC m=+1256.599810496 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift") pod "swift-storage-0" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f") : configmap "swift-ring-files" not found Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.708491 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.733561 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-947jp\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-kube-api-access-947jp\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:42 crc kubenswrapper[4954]: I1128 16:31:42.737250 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:43 crc kubenswrapper[4954]: I1128 16:31:43.216374 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:43 crc kubenswrapper[4954]: E1128 16:31:43.216648 4954 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:31:43 crc kubenswrapper[4954]: E1128 16:31:43.217235 4954 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:31:43 crc kubenswrapper[4954]: E1128 16:31:43.217310 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift podName:6e076e8f-11b0-48ea-a26c-302df7a0ed2f nodeName:}" failed. No retries permitted until 2025-11-28 16:31:44.21728303 +0000 UTC m=+1257.608951601 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift") pod "swift-storage-0" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f") : configmap "swift-ring-files" not found Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.235396 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:44 crc kubenswrapper[4954]: E1128 16:31:44.235559 4954 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:31:44 crc kubenswrapper[4954]: E1128 16:31:44.235677 4954 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:31:44 crc kubenswrapper[4954]: E1128 16:31:44.235733 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift podName:6e076e8f-11b0-48ea-a26c-302df7a0ed2f nodeName:}" failed. No retries permitted until 2025-11-28 16:31:46.235714634 +0000 UTC m=+1259.627383175 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift") pod "swift-storage-0" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f") : configmap "swift-ring-files" not found Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.378074 4954 generic.go:334] "Generic (PLEG): container finished" podID="7c69ec0b-b98f-4181-99bb-7b7f29c20fa9" containerID="0ed6f2d737022bca1d619301fdbfeda02b49d72ac4c9728e5a5e8d61013a751f" exitCode=0 Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.378151 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bwl88" event={"ID":"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9","Type":"ContainerDied","Data":"0ed6f2d737022bca1d619301fdbfeda02b49d72ac4c9728e5a5e8d61013a751f"} Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.380115 4954 generic.go:334] "Generic (PLEG): container finished" podID="d3e62442-5977-4a34-b774-d11724b64832" containerID="4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7" exitCode=0 Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.380219 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" event={"ID":"d3e62442-5977-4a34-b774-d11724b64832","Type":"ContainerDied","Data":"4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7"} Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.381884 4954 generic.go:334] "Generic (PLEG): container finished" podID="8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2" containerID="8e4a60a0c9ebe4259702f5bf21ad46c42f85291c4afce54389154b16e2fc0736" exitCode=0 Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.381988 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-80ff-account-create-update-b5gtr" event={"ID":"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2","Type":"ContainerDied","Data":"8e4a60a0c9ebe4259702f5bf21ad46c42f85291c4afce54389154b16e2fc0736"} Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.383556 4954 generic.go:334] "Generic (PLEG): container finished" podID="8a6b461d-5be3-4805-9ba7-36cffead16e9" containerID="5fa26db3f297ab8d0dc997e2fdc6ffd39edf2b0f78a79277102da75c5bcef787" exitCode=0 Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.383613 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-djcvh" event={"ID":"8a6b461d-5be3-4805-9ba7-36cffead16e9","Type":"ContainerDied","Data":"5fa26db3f297ab8d0dc997e2fdc6ffd39edf2b0f78a79277102da75c5bcef787"} Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.385170 4954 generic.go:334] "Generic (PLEG): container finished" podID="8b16e07c-9b6f-429b-b239-3efa92727910" containerID="7d88064efa113b13270a380653c1ca5ca6b16375b3af64a054f3dfe5666ce593" exitCode=0 Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.385207 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4c07-account-create-update-ph8bg" event={"ID":"8b16e07c-9b6f-429b-b239-3efa92727910","Type":"ContainerDied","Data":"7d88064efa113b13270a380653c1ca5ca6b16375b3af64a054f3dfe5666ce593"} Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.511669 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-fs8x2"] Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.513452 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.519661 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-fs8x2"] Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.619819 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-b261-account-create-update-z4z7m"] Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.620966 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.625822 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.629250 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-b261-account-create-update-z4z7m"] Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.656567 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glnvh\" (UniqueName: \"kubernetes.io/projected/537c74c3-f8aa-4e8f-961b-87bf6a088574-kube-api-access-glnvh\") pod \"glance-db-create-fs8x2\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.656742 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/537c74c3-f8aa-4e8f-961b-87bf6a088574-operator-scripts\") pod \"glance-db-create-fs8x2\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.758905 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-operator-scripts\") pod \"glance-b261-account-create-update-z4z7m\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.758969 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/537c74c3-f8aa-4e8f-961b-87bf6a088574-operator-scripts\") pod \"glance-db-create-fs8x2\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.759015 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glnvh\" (UniqueName: \"kubernetes.io/projected/537c74c3-f8aa-4e8f-961b-87bf6a088574-kube-api-access-glnvh\") pod \"glance-db-create-fs8x2\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.759070 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrwd9\" (UniqueName: \"kubernetes.io/projected/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-kube-api-access-rrwd9\") pod \"glance-b261-account-create-update-z4z7m\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.759831 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/537c74c3-f8aa-4e8f-961b-87bf6a088574-operator-scripts\") pod \"glance-db-create-fs8x2\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.781343 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glnvh\" (UniqueName: \"kubernetes.io/projected/537c74c3-f8aa-4e8f-961b-87bf6a088574-kube-api-access-glnvh\") pod \"glance-db-create-fs8x2\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.860439 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-operator-scripts\") pod \"glance-b261-account-create-update-z4z7m\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.860516 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrwd9\" (UniqueName: \"kubernetes.io/projected/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-kube-api-access-rrwd9\") pod \"glance-b261-account-create-update-z4z7m\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.861187 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-operator-scripts\") pod \"glance-b261-account-create-update-z4z7m\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.882136 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrwd9\" (UniqueName: \"kubernetes.io/projected/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-kube-api-access-rrwd9\") pod \"glance-b261-account-create-update-z4z7m\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.899850 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:44 crc kubenswrapper[4954]: I1128 16:31:44.940203 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:45 crc kubenswrapper[4954]: I1128 16:31:45.357896 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-fs8x2"] Nov 28 16:31:45 crc kubenswrapper[4954]: W1128 16:31:45.362820 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod537c74c3_f8aa_4e8f_961b_87bf6a088574.slice/crio-ed40f487599f010bf870f0a29955bb3db6fbd630e7b4d01719ee25478b3f7ccb WatchSource:0}: Error finding container ed40f487599f010bf870f0a29955bb3db6fbd630e7b4d01719ee25478b3f7ccb: Status 404 returned error can't find the container with id ed40f487599f010bf870f0a29955bb3db6fbd630e7b4d01719ee25478b3f7ccb Nov 28 16:31:45 crc kubenswrapper[4954]: I1128 16:31:45.421123 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-fs8x2" event={"ID":"537c74c3-f8aa-4e8f-961b-87bf6a088574","Type":"ContainerStarted","Data":"ed40f487599f010bf870f0a29955bb3db6fbd630e7b4d01719ee25478b3f7ccb"} Nov 28 16:31:45 crc kubenswrapper[4954]: I1128 16:31:45.425156 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" event={"ID":"d3e62442-5977-4a34-b774-d11724b64832","Type":"ContainerStarted","Data":"584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00"} Nov 28 16:31:45 crc kubenswrapper[4954]: I1128 16:31:45.425481 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:45 crc kubenswrapper[4954]: I1128 16:31:45.480434 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" podStartSLOduration=4.480418907 podStartE2EDuration="4.480418907s" podCreationTimestamp="2025-11-28 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:45.478917431 +0000 UTC m=+1258.870585982" watchObservedRunningTime="2025-11-28 16:31:45.480418907 +0000 UTC m=+1258.872087448" Nov 28 16:31:45 crc kubenswrapper[4954]: I1128 16:31:45.511210 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-b261-account-create-update-z4z7m"] Nov 28 16:31:45 crc kubenswrapper[4954]: W1128 16:31:45.554206 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd2f088a_e1b5_4f44_8296_f6e8d36ce6cd.slice/crio-5ce439cea22c0bcc7652d703a966dbc81252e1f8d0feb45124d93e5f7e6e8889 WatchSource:0}: Error finding container 5ce439cea22c0bcc7652d703a966dbc81252e1f8d0feb45124d93e5f7e6e8889: Status 404 returned error can't find the container with id 5ce439cea22c0bcc7652d703a966dbc81252e1f8d0feb45124d93e5f7e6e8889 Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.084088 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.192441 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b16e07c-9b6f-429b-b239-3efa92727910-operator-scripts\") pod \"8b16e07c-9b6f-429b-b239-3efa92727910\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.192561 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nr9z\" (UniqueName: \"kubernetes.io/projected/8b16e07c-9b6f-429b-b239-3efa92727910-kube-api-access-7nr9z\") pod \"8b16e07c-9b6f-429b-b239-3efa92727910\" (UID: \"8b16e07c-9b6f-429b-b239-3efa92727910\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.193220 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b16e07c-9b6f-429b-b239-3efa92727910-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8b16e07c-9b6f-429b-b239-3efa92727910" (UID: "8b16e07c-9b6f-429b-b239-3efa92727910"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.200207 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b16e07c-9b6f-429b-b239-3efa92727910-kube-api-access-7nr9z" (OuterVolumeSpecName: "kube-api-access-7nr9z") pod "8b16e07c-9b6f-429b-b239-3efa92727910" (UID: "8b16e07c-9b6f-429b-b239-3efa92727910"). InnerVolumeSpecName "kube-api-access-7nr9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.234359 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.238657 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.294732 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.294806 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b16e07c-9b6f-429b-b239-3efa92727910-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.294817 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nr9z\" (UniqueName: \"kubernetes.io/projected/8b16e07c-9b6f-429b-b239-3efa92727910-kube-api-access-7nr9z\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: E1128 16:31:46.294925 4954 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:31:46 crc kubenswrapper[4954]: E1128 16:31:46.294937 4954 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:31:46 crc kubenswrapper[4954]: E1128 16:31:46.294977 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift podName:6e076e8f-11b0-48ea-a26c-302df7a0ed2f nodeName:}" failed. No retries permitted until 2025-11-28 16:31:50.294962631 +0000 UTC m=+1263.686631172 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift") pod "swift-storage-0" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f") : configmap "swift-ring-files" not found Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.327353 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-djcvh" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.399603 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-operator-scripts\") pod \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.400300 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlh2f\" (UniqueName: \"kubernetes.io/projected/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-kube-api-access-xlh2f\") pod \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.400362 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-operator-scripts\") pod \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\" (UID: \"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.400838 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s7jq\" (UniqueName: \"kubernetes.io/projected/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-kube-api-access-5s7jq\") pod \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\" (UID: \"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.402133 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2" (UID: "8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.402153 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7c69ec0b-b98f-4181-99bb-7b7f29c20fa9" (UID: "7c69ec0b-b98f-4181-99bb-7b7f29c20fa9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.407989 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-kube-api-access-xlh2f" (OuterVolumeSpecName: "kube-api-access-xlh2f") pod "8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2" (UID: "8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2"). InnerVolumeSpecName "kube-api-access-xlh2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.408402 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-kube-api-access-5s7jq" (OuterVolumeSpecName: "kube-api-access-5s7jq") pod "7c69ec0b-b98f-4181-99bb-7b7f29c20fa9" (UID: "7c69ec0b-b98f-4181-99bb-7b7f29c20fa9"). InnerVolumeSpecName "kube-api-access-5s7jq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.431876 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-24bxb"] Nov 28 16:31:46 crc kubenswrapper[4954]: E1128 16:31:46.432205 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b16e07c-9b6f-429b-b239-3efa92727910" containerName="mariadb-account-create-update" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432223 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b16e07c-9b6f-429b-b239-3efa92727910" containerName="mariadb-account-create-update" Nov 28 16:31:46 crc kubenswrapper[4954]: E1128 16:31:46.432246 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2" containerName="mariadb-account-create-update" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432253 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2" containerName="mariadb-account-create-update" Nov 28 16:31:46 crc kubenswrapper[4954]: E1128 16:31:46.432266 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c69ec0b-b98f-4181-99bb-7b7f29c20fa9" containerName="mariadb-database-create" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432272 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c69ec0b-b98f-4181-99bb-7b7f29c20fa9" containerName="mariadb-database-create" Nov 28 16:31:46 crc kubenswrapper[4954]: E1128 16:31:46.432283 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6b461d-5be3-4805-9ba7-36cffead16e9" containerName="mariadb-database-create" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432289 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6b461d-5be3-4805-9ba7-36cffead16e9" containerName="mariadb-database-create" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432437 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b16e07c-9b6f-429b-b239-3efa92727910" containerName="mariadb-account-create-update" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432449 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2" containerName="mariadb-account-create-update" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432465 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c69ec0b-b98f-4181-99bb-7b7f29c20fa9" containerName="mariadb-database-create" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.432480 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a6b461d-5be3-4805-9ba7-36cffead16e9" containerName="mariadb-database-create" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.433011 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.436285 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.436561 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.436832 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.444164 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-24bxb"] Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.451642 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bwl88" event={"ID":"7c69ec0b-b98f-4181-99bb-7b7f29c20fa9","Type":"ContainerDied","Data":"c627a6ebec39dbd4da5c0b26522113435ed3d6594e7e385a365464c0af60a3e7"} Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.451682 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c627a6ebec39dbd4da5c0b26522113435ed3d6594e7e385a365464c0af60a3e7" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.451768 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bwl88" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.453447 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-80ff-account-create-update-b5gtr" event={"ID":"8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2","Type":"ContainerDied","Data":"eb838218ca26754db98c82113b524723ced2179e83070c76d39753a42a63ff4c"} Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.453492 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb838218ca26754db98c82113b524723ced2179e83070c76d39753a42a63ff4c" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.453598 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-80ff-account-create-update-b5gtr" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.455130 4954 generic.go:334] "Generic (PLEG): container finished" podID="537c74c3-f8aa-4e8f-961b-87bf6a088574" containerID="b9a5897d5f8e65f42720138f7ef57bcd5702772168bdecf361e4be0f16626b95" exitCode=0 Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.455209 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-fs8x2" event={"ID":"537c74c3-f8aa-4e8f-961b-87bf6a088574","Type":"ContainerDied","Data":"b9a5897d5f8e65f42720138f7ef57bcd5702772168bdecf361e4be0f16626b95"} Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.456834 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-djcvh" event={"ID":"8a6b461d-5be3-4805-9ba7-36cffead16e9","Type":"ContainerDied","Data":"95a45465662797c6f4909e3753a8773e3a2028c35a577c3f0c088fb64015dc78"} Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.456956 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95a45465662797c6f4909e3753a8773e3a2028c35a577c3f0c088fb64015dc78" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.457368 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-djcvh" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.458402 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4c07-account-create-update-ph8bg" event={"ID":"8b16e07c-9b6f-429b-b239-3efa92727910","Type":"ContainerDied","Data":"fbbe7c50e39b48b3d9349c71b472c51b21fb4d92fa470c11854a90ef2fc3a27f"} Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.458427 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fbbe7c50e39b48b3d9349c71b472c51b21fb4d92fa470c11854a90ef2fc3a27f" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.458495 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c07-account-create-update-ph8bg" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.459694 4954 generic.go:334] "Generic (PLEG): container finished" podID="bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd" containerID="b9df2fdeddb1068c516718c64131c970593e818b9cd77028270f8bd7c828f26c" exitCode=0 Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.460598 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b261-account-create-update-z4z7m" event={"ID":"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd","Type":"ContainerDied","Data":"b9df2fdeddb1068c516718c64131c970593e818b9cd77028270f8bd7c828f26c"} Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.460631 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b261-account-create-update-z4z7m" event={"ID":"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd","Type":"ContainerStarted","Data":"5ce439cea22c0bcc7652d703a966dbc81252e1f8d0feb45124d93e5f7e6e8889"} Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.501777 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7jgm\" (UniqueName: \"kubernetes.io/projected/8a6b461d-5be3-4805-9ba7-36cffead16e9-kube-api-access-b7jgm\") pod \"8a6b461d-5be3-4805-9ba7-36cffead16e9\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.501900 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a6b461d-5be3-4805-9ba7-36cffead16e9-operator-scripts\") pod \"8a6b461d-5be3-4805-9ba7-36cffead16e9\" (UID: \"8a6b461d-5be3-4805-9ba7-36cffead16e9\") " Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503299 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5qhq\" (UniqueName: \"kubernetes.io/projected/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-kube-api-access-c5qhq\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503334 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-ring-data-devices\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503429 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-etc-swift\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503446 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-dispersionconf\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503484 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-combined-ca-bundle\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503657 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-swiftconf\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503682 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-scripts\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503750 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503766 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlh2f\" (UniqueName: \"kubernetes.io/projected/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-kube-api-access-xlh2f\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.503778 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.504016 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s7jq\" (UniqueName: \"kubernetes.io/projected/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9-kube-api-access-5s7jq\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.504577 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a6b461d-5be3-4805-9ba7-36cffead16e9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a6b461d-5be3-4805-9ba7-36cffead16e9" (UID: "8a6b461d-5be3-4805-9ba7-36cffead16e9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.507349 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a6b461d-5be3-4805-9ba7-36cffead16e9-kube-api-access-b7jgm" (OuterVolumeSpecName: "kube-api-access-b7jgm") pod "8a6b461d-5be3-4805-9ba7-36cffead16e9" (UID: "8a6b461d-5be3-4805-9ba7-36cffead16e9"). InnerVolumeSpecName "kube-api-access-b7jgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.605806 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-etc-swift\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.605862 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-dispersionconf\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.605909 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-combined-ca-bundle\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.606003 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-swiftconf\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.606028 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-scripts\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.606074 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5qhq\" (UniqueName: \"kubernetes.io/projected/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-kube-api-access-c5qhq\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.606096 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-ring-data-devices\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.606147 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7jgm\" (UniqueName: \"kubernetes.io/projected/8a6b461d-5be3-4805-9ba7-36cffead16e9-kube-api-access-b7jgm\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.606162 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a6b461d-5be3-4805-9ba7-36cffead16e9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.607246 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-ring-data-devices\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.607501 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-scripts\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.608557 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-etc-swift\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.609559 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-swiftconf\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.609602 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-dispersionconf\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.610007 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-combined-ca-bundle\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.622888 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5qhq\" (UniqueName: \"kubernetes.io/projected/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-kube-api-access-c5qhq\") pod \"swift-ring-rebalance-24bxb\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:46 crc kubenswrapper[4954]: I1128 16:31:46.861704 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:31:47 crc kubenswrapper[4954]: W1128 16:31:47.371200 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8ae2a66_ae7e_46a5_8485_ed7a0c3293b4.slice/crio-c891ba858da8a87f6977ecfe7f3888cf91cbcbe26b53105378a3ce0cf4898569 WatchSource:0}: Error finding container c891ba858da8a87f6977ecfe7f3888cf91cbcbe26b53105378a3ce0cf4898569: Status 404 returned error can't find the container with id c891ba858da8a87f6977ecfe7f3888cf91cbcbe26b53105378a3ce0cf4898569 Nov 28 16:31:47 crc kubenswrapper[4954]: I1128 16:31:47.372848 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-24bxb"] Nov 28 16:31:47 crc kubenswrapper[4954]: I1128 16:31:47.467202 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-24bxb" event={"ID":"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4","Type":"ContainerStarted","Data":"c891ba858da8a87f6977ecfe7f3888cf91cbcbe26b53105378a3ce0cf4898569"} Nov 28 16:31:47 crc kubenswrapper[4954]: I1128 16:31:47.469168 4954 generic.go:334] "Generic (PLEG): container finished" podID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerID="edef8b1b7915e1f7d6f114baa465a89b8aaf40344d1baefb410030093e2aaa7b" exitCode=0 Nov 28 16:31:47 crc kubenswrapper[4954]: I1128 16:31:47.469276 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca81ef12-eb13-468e-81fc-0fdf6aba8830","Type":"ContainerDied","Data":"edef8b1b7915e1f7d6f114baa465a89b8aaf40344d1baefb410030093e2aaa7b"} Nov 28 16:31:47 crc kubenswrapper[4954]: I1128 16:31:47.891817 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:47 crc kubenswrapper[4954]: I1128 16:31:47.903581 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.030449 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrwd9\" (UniqueName: \"kubernetes.io/projected/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-kube-api-access-rrwd9\") pod \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.030505 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glnvh\" (UniqueName: \"kubernetes.io/projected/537c74c3-f8aa-4e8f-961b-87bf6a088574-kube-api-access-glnvh\") pod \"537c74c3-f8aa-4e8f-961b-87bf6a088574\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.030669 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-operator-scripts\") pod \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\" (UID: \"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd\") " Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.030693 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/537c74c3-f8aa-4e8f-961b-87bf6a088574-operator-scripts\") pod \"537c74c3-f8aa-4e8f-961b-87bf6a088574\" (UID: \"537c74c3-f8aa-4e8f-961b-87bf6a088574\") " Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.031628 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/537c74c3-f8aa-4e8f-961b-87bf6a088574-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "537c74c3-f8aa-4e8f-961b-87bf6a088574" (UID: "537c74c3-f8aa-4e8f-961b-87bf6a088574"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.031959 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd" (UID: "bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.035515 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-kube-api-access-rrwd9" (OuterVolumeSpecName: "kube-api-access-rrwd9") pod "bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd" (UID: "bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd"). InnerVolumeSpecName "kube-api-access-rrwd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.035633 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/537c74c3-f8aa-4e8f-961b-87bf6a088574-kube-api-access-glnvh" (OuterVolumeSpecName: "kube-api-access-glnvh") pod "537c74c3-f8aa-4e8f-961b-87bf6a088574" (UID: "537c74c3-f8aa-4e8f-961b-87bf6a088574"). InnerVolumeSpecName "kube-api-access-glnvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.133102 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrwd9\" (UniqueName: \"kubernetes.io/projected/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-kube-api-access-rrwd9\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.133487 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glnvh\" (UniqueName: \"kubernetes.io/projected/537c74c3-f8aa-4e8f-961b-87bf6a088574-kube-api-access-glnvh\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.133500 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.133510 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/537c74c3-f8aa-4e8f-961b-87bf6a088574-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.485268 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca81ef12-eb13-468e-81fc-0fdf6aba8830","Type":"ContainerStarted","Data":"dd5fdf435b0dbd1c11c86c7785b0da91c9296f8511aad91bbac2bb898f435f32"} Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.486557 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.486987 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-fs8x2" event={"ID":"537c74c3-f8aa-4e8f-961b-87bf6a088574","Type":"ContainerDied","Data":"ed40f487599f010bf870f0a29955bb3db6fbd630e7b4d01719ee25478b3f7ccb"} Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.487018 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed40f487599f010bf870f0a29955bb3db6fbd630e7b4d01719ee25478b3f7ccb" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.487100 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-fs8x2" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.489819 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b261-account-create-update-z4z7m" event={"ID":"bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd","Type":"ContainerDied","Data":"5ce439cea22c0bcc7652d703a966dbc81252e1f8d0feb45124d93e5f7e6e8889"} Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.489851 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ce439cea22c0bcc7652d703a966dbc81252e1f8d0feb45124d93e5f7e6e8889" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.489861 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b261-account-create-update-z4z7m" Nov 28 16:31:48 crc kubenswrapper[4954]: I1128 16:31:48.533982 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.389921275 podStartE2EDuration="1m13.533959812s" podCreationTimestamp="2025-11-28 16:30:35 +0000 UTC" firstStartedPulling="2025-11-28 16:30:37.305626784 +0000 UTC m=+1190.697295325" lastFinishedPulling="2025-11-28 16:31:13.449665311 +0000 UTC m=+1226.841333862" observedRunningTime="2025-11-28 16:31:48.528429599 +0000 UTC m=+1261.920098150" watchObservedRunningTime="2025-11-28 16:31:48.533959812 +0000 UTC m=+1261.925628373" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.836255 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-6kcgd"] Nov 28 16:31:49 crc kubenswrapper[4954]: E1128 16:31:49.836896 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd" containerName="mariadb-account-create-update" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.836908 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd" containerName="mariadb-account-create-update" Nov 28 16:31:49 crc kubenswrapper[4954]: E1128 16:31:49.836920 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="537c74c3-f8aa-4e8f-961b-87bf6a088574" containerName="mariadb-database-create" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.836926 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="537c74c3-f8aa-4e8f-961b-87bf6a088574" containerName="mariadb-database-create" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.837093 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="537c74c3-f8aa-4e8f-961b-87bf6a088574" containerName="mariadb-database-create" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.837117 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd" containerName="mariadb-account-create-update" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.837654 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.843399 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-z8dvv" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.843718 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 16:31:49 crc kubenswrapper[4954]: I1128 16:31:49.845031 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-6kcgd"] Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.038972 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-db-sync-config-data\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.039020 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-config-data\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.039068 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-combined-ca-bundle\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.039113 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvggz\" (UniqueName: \"kubernetes.io/projected/1e50d182-9126-4ef1-a377-8620e0357d4d-kube-api-access-kvggz\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.140690 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-combined-ca-bundle\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.140828 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvggz\" (UniqueName: \"kubernetes.io/projected/1e50d182-9126-4ef1-a377-8620e0357d4d-kube-api-access-kvggz\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.141034 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-db-sync-config-data\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.142008 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-config-data\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.149125 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-db-sync-config-data\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.150905 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-combined-ca-bundle\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.151179 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-config-data\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.163692 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.168122 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvggz\" (UniqueName: \"kubernetes.io/projected/1e50d182-9126-4ef1-a377-8620e0357d4d-kube-api-access-kvggz\") pod \"glance-db-sync-6kcgd\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.169081 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-vmnzt" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" probeResult="failure" output=< Nov 28 16:31:50 crc kubenswrapper[4954]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 16:31:50 crc kubenswrapper[4954]: > Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.177948 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.340670 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6kcgd" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.345035 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:50 crc kubenswrapper[4954]: E1128 16:31:50.345171 4954 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:31:50 crc kubenswrapper[4954]: E1128 16:31:50.345269 4954 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:31:50 crc kubenswrapper[4954]: E1128 16:31:50.345325 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift podName:6e076e8f-11b0-48ea-a26c-302df7a0ed2f nodeName:}" failed. No retries permitted until 2025-11-28 16:31:58.345307422 +0000 UTC m=+1271.736975963 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift") pod "swift-storage-0" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f") : configmap "swift-ring-files" not found Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.432313 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vmnzt-config-mbp48"] Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.433374 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.435783 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.446823 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-scripts\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.446874 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run-ovn\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.446941 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.446981 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-log-ovn\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.447049 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-additional-scripts\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.447081 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64xg8\" (UniqueName: \"kubernetes.io/projected/fca16299-6b70-4f89-920f-8c1fe550b8ab-kube-api-access-64xg8\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.448208 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vmnzt-config-mbp48"] Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.548578 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.548729 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-log-ovn\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.548819 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-additional-scripts\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.548850 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64xg8\" (UniqueName: \"kubernetes.io/projected/fca16299-6b70-4f89-920f-8c1fe550b8ab-kube-api-access-64xg8\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.548937 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-scripts\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.548959 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run-ovn\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.549063 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-log-ovn\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.549069 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.549106 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run-ovn\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.550268 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-additional-scripts\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.552116 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-scripts\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.578338 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64xg8\" (UniqueName: \"kubernetes.io/projected/fca16299-6b70-4f89-920f-8c1fe550b8ab-kube-api-access-64xg8\") pod \"ovn-controller-vmnzt-config-mbp48\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:50 crc kubenswrapper[4954]: I1128 16:31:50.761881 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:51 crc kubenswrapper[4954]: I1128 16:31:51.723291 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:31:51 crc kubenswrapper[4954]: I1128 16:31:51.788007 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2cpmm"] Nov 28 16:31:51 crc kubenswrapper[4954]: I1128 16:31:51.788284 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-2cpmm" podUID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerName="dnsmasq-dns" containerID="cri-o://5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd" gracePeriod=10 Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.289361 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.330494 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vmnzt-config-mbp48"] Nov 28 16:31:52 crc kubenswrapper[4954]: W1128 16:31:52.343479 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfca16299_6b70_4f89_920f_8c1fe550b8ab.slice/crio-4292365866b59aee823b51aab7ebfcc54b5095046eba1d95bcd40a9d3fd1a169 WatchSource:0}: Error finding container 4292365866b59aee823b51aab7ebfcc54b5095046eba1d95bcd40a9d3fd1a169: Status 404 returned error can't find the container with id 4292365866b59aee823b51aab7ebfcc54b5095046eba1d95bcd40a9d3fd1a169 Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.384102 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-sb\") pod \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.384228 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-nb\") pod \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.384254 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hr5fz\" (UniqueName: \"kubernetes.io/projected/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-kube-api-access-hr5fz\") pod \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.384326 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-dns-svc\") pod \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.384380 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-config\") pod \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\" (UID: \"0ba93bae-c03c-4d0b-b911-cb47ab7569bc\") " Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.400065 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-kube-api-access-hr5fz" (OuterVolumeSpecName: "kube-api-access-hr5fz") pod "0ba93bae-c03c-4d0b-b911-cb47ab7569bc" (UID: "0ba93bae-c03c-4d0b-b911-cb47ab7569bc"). InnerVolumeSpecName "kube-api-access-hr5fz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.433676 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-6kcgd"] Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.447956 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0ba93bae-c03c-4d0b-b911-cb47ab7569bc" (UID: "0ba93bae-c03c-4d0b-b911-cb47ab7569bc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.463661 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0ba93bae-c03c-4d0b-b911-cb47ab7569bc" (UID: "0ba93bae-c03c-4d0b-b911-cb47ab7569bc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.476762 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-config" (OuterVolumeSpecName: "config") pod "0ba93bae-c03c-4d0b-b911-cb47ab7569bc" (UID: "0ba93bae-c03c-4d0b-b911-cb47ab7569bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.485509 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.485558 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hr5fz\" (UniqueName: \"kubernetes.io/projected/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-kube-api-access-hr5fz\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.485570 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.485580 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.487433 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0ba93bae-c03c-4d0b-b911-cb47ab7569bc" (UID: "0ba93bae-c03c-4d0b-b911-cb47ab7569bc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.527723 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6kcgd" event={"ID":"1e50d182-9126-4ef1-a377-8620e0357d4d","Type":"ContainerStarted","Data":"4670b573a3dab28167da0e8c56eeaf569a64ddbef0b36cfe679a23326c7eb5bf"} Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.529028 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt-config-mbp48" event={"ID":"fca16299-6b70-4f89-920f-8c1fe550b8ab","Type":"ContainerStarted","Data":"4292365866b59aee823b51aab7ebfcc54b5095046eba1d95bcd40a9d3fd1a169"} Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.530765 4954 generic.go:334] "Generic (PLEG): container finished" podID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerID="5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd" exitCode=0 Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.530816 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2cpmm" event={"ID":"0ba93bae-c03c-4d0b-b911-cb47ab7569bc","Type":"ContainerDied","Data":"5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd"} Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.530837 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-2cpmm" event={"ID":"0ba93bae-c03c-4d0b-b911-cb47ab7569bc","Type":"ContainerDied","Data":"c9186b853e3715fa9839bf6c7b1b73d8db21cb9e3eac94e09b693a04afd43e41"} Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.530855 4954 scope.go:117] "RemoveContainer" containerID="5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.531013 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-2cpmm" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.539040 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-24bxb" event={"ID":"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4","Type":"ContainerStarted","Data":"64fd4fcf3a2617a8546b352438f8e3e0a582c3c419306e2e045c132b0817e838"} Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.570251 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-24bxb" podStartSLOduration=2.331799339 podStartE2EDuration="6.570218202s" podCreationTimestamp="2025-11-28 16:31:46 +0000 UTC" firstStartedPulling="2025-11-28 16:31:47.374323285 +0000 UTC m=+1260.765991826" lastFinishedPulling="2025-11-28 16:31:51.612742148 +0000 UTC m=+1265.004410689" observedRunningTime="2025-11-28 16:31:52.559250148 +0000 UTC m=+1265.950918709" watchObservedRunningTime="2025-11-28 16:31:52.570218202 +0000 UTC m=+1265.961886753" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.589938 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0ba93bae-c03c-4d0b-b911-cb47ab7569bc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.611378 4954 scope.go:117] "RemoveContainer" containerID="8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.630483 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2cpmm"] Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.638561 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-2cpmm"] Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.656769 4954 scope.go:117] "RemoveContainer" containerID="5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd" Nov 28 16:31:52 crc kubenswrapper[4954]: E1128 16:31:52.657902 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd\": container with ID starting with 5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd not found: ID does not exist" containerID="5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.658051 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd"} err="failed to get container status \"5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd\": rpc error: code = NotFound desc = could not find container \"5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd\": container with ID starting with 5225d0166797b1bc0abbc31207d32c08986ecb5d0c0c14ce43cd077db5aa9fbd not found: ID does not exist" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.658152 4954 scope.go:117] "RemoveContainer" containerID="8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa" Nov 28 16:31:52 crc kubenswrapper[4954]: E1128 16:31:52.659863 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa\": container with ID starting with 8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa not found: ID does not exist" containerID="8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa" Nov 28 16:31:52 crc kubenswrapper[4954]: I1128 16:31:52.659980 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa"} err="failed to get container status \"8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa\": rpc error: code = NotFound desc = could not find container \"8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa\": container with ID starting with 8d55ec26ef5770b4881fb8b3b21a9568984a3a7cbed0091fa292b7920a4ffeaa not found: ID does not exist" Nov 28 16:31:53 crc kubenswrapper[4954]: I1128 16:31:53.867799 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" path="/var/lib/kubelet/pods/0ba93bae-c03c-4d0b-b911-cb47ab7569bc/volumes" Nov 28 16:31:54 crc kubenswrapper[4954]: I1128 16:31:54.556719 4954 generic.go:334] "Generic (PLEG): container finished" podID="fca16299-6b70-4f89-920f-8c1fe550b8ab" containerID="a8372f58c68e6bbdd280dfc7db7108a88ab136fc4bc2e5c65170e347000c679f" exitCode=0 Nov 28 16:31:54 crc kubenswrapper[4954]: I1128 16:31:54.556838 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt-config-mbp48" event={"ID":"fca16299-6b70-4f89-920f-8c1fe550b8ab","Type":"ContainerDied","Data":"a8372f58c68e6bbdd280dfc7db7108a88ab136fc4bc2e5c65170e347000c679f"} Nov 28 16:31:55 crc kubenswrapper[4954]: I1128 16:31:55.130834 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-vmnzt" Nov 28 16:31:55 crc kubenswrapper[4954]: I1128 16:31:55.583012 4954 generic.go:334] "Generic (PLEG): container finished" podID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerID="350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd" exitCode=0 Nov 28 16:31:55 crc kubenswrapper[4954]: I1128 16:31:55.583176 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8a252e1a-c96a-4f98-b24e-b224fedf344c","Type":"ContainerDied","Data":"350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd"} Nov 28 16:31:55 crc kubenswrapper[4954]: I1128 16:31:55.970512 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.150716 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-additional-scripts\") pod \"fca16299-6b70-4f89-920f-8c1fe550b8ab\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.150767 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-scripts\") pod \"fca16299-6b70-4f89-920f-8c1fe550b8ab\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.150858 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run\") pod \"fca16299-6b70-4f89-920f-8c1fe550b8ab\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.150882 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-log-ovn\") pod \"fca16299-6b70-4f89-920f-8c1fe550b8ab\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.150910 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64xg8\" (UniqueName: \"kubernetes.io/projected/fca16299-6b70-4f89-920f-8c1fe550b8ab-kube-api-access-64xg8\") pod \"fca16299-6b70-4f89-920f-8c1fe550b8ab\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.150936 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run-ovn\") pod \"fca16299-6b70-4f89-920f-8c1fe550b8ab\" (UID: \"fca16299-6b70-4f89-920f-8c1fe550b8ab\") " Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.151333 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "fca16299-6b70-4f89-920f-8c1fe550b8ab" (UID: "fca16299-6b70-4f89-920f-8c1fe550b8ab"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.152006 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "fca16299-6b70-4f89-920f-8c1fe550b8ab" (UID: "fca16299-6b70-4f89-920f-8c1fe550b8ab"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.152681 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-scripts" (OuterVolumeSpecName: "scripts") pod "fca16299-6b70-4f89-920f-8c1fe550b8ab" (UID: "fca16299-6b70-4f89-920f-8c1fe550b8ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.152708 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run" (OuterVolumeSpecName: "var-run") pod "fca16299-6b70-4f89-920f-8c1fe550b8ab" (UID: "fca16299-6b70-4f89-920f-8c1fe550b8ab"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.152724 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "fca16299-6b70-4f89-920f-8c1fe550b8ab" (UID: "fca16299-6b70-4f89-920f-8c1fe550b8ab"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.164813 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fca16299-6b70-4f89-920f-8c1fe550b8ab-kube-api-access-64xg8" (OuterVolumeSpecName: "kube-api-access-64xg8") pod "fca16299-6b70-4f89-920f-8c1fe550b8ab" (UID: "fca16299-6b70-4f89-920f-8c1fe550b8ab"). InnerVolumeSpecName "kube-api-access-64xg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.252962 4954 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.252995 4954 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.253006 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64xg8\" (UniqueName: \"kubernetes.io/projected/fca16299-6b70-4f89-920f-8c1fe550b8ab-kube-api-access-64xg8\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.253015 4954 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fca16299-6b70-4f89-920f-8c1fe550b8ab-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.253025 4954 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.253821 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fca16299-6b70-4f89-920f-8c1fe550b8ab-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.596219 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8a252e1a-c96a-4f98-b24e-b224fedf344c","Type":"ContainerStarted","Data":"bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d"} Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.597518 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.598900 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt-config-mbp48" event={"ID":"fca16299-6b70-4f89-920f-8c1fe550b8ab","Type":"ContainerDied","Data":"4292365866b59aee823b51aab7ebfcc54b5095046eba1d95bcd40a9d3fd1a169"} Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.598925 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4292365866b59aee823b51aab7ebfcc54b5095046eba1d95bcd40a9d3fd1a169" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.598961 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-mbp48" Nov 28 16:31:56 crc kubenswrapper[4954]: I1128 16:31:56.633016 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371954.221785 podStartE2EDuration="1m22.632992161s" podCreationTimestamp="2025-11-28 16:30:34 +0000 UTC" firstStartedPulling="2025-11-28 16:30:36.938722737 +0000 UTC m=+1190.330391278" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:31:56.628463109 +0000 UTC m=+1270.020131670" watchObservedRunningTime="2025-11-28 16:31:56.632992161 +0000 UTC m=+1270.024660702" Nov 28 16:31:56 crc kubenswrapper[4954]: E1128 16:31:56.832676 4954 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfca16299_6b70_4f89_920f_8c1fe550b8ab.slice\": RecentStats: unable to find data in memory cache]" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.078277 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-vmnzt-config-mbp48"] Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.088959 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-vmnzt-config-mbp48"] Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.200610 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-vmnzt-config-6nbg7"] Nov 28 16:31:57 crc kubenswrapper[4954]: E1128 16:31:57.200979 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fca16299-6b70-4f89-920f-8c1fe550b8ab" containerName="ovn-config" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.200998 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fca16299-6b70-4f89-920f-8c1fe550b8ab" containerName="ovn-config" Nov 28 16:31:57 crc kubenswrapper[4954]: E1128 16:31:57.201022 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerName="init" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.201033 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerName="init" Nov 28 16:31:57 crc kubenswrapper[4954]: E1128 16:31:57.201063 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerName="dnsmasq-dns" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.201082 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerName="dnsmasq-dns" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.201270 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ba93bae-c03c-4d0b-b911-cb47ab7569bc" containerName="dnsmasq-dns" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.201288 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="fca16299-6b70-4f89-920f-8c1fe550b8ab" containerName="ovn-config" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.201932 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.208407 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.221856 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vmnzt-config-6nbg7"] Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.381591 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.381887 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-additional-scripts\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.381947 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-log-ovn\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.382059 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-scripts\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.382088 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run-ovn\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.382175 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfzx9\" (UniqueName: \"kubernetes.io/projected/01e80ddd-6532-4f89-9523-025e2cb55fb5-kube-api-access-rfzx9\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.484641 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-additional-scripts\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.485601 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-additional-scripts\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.485674 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-log-ovn\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.485847 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-scripts\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.485873 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run-ovn\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.486160 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-log-ovn\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.486250 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run-ovn\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.488127 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-scripts\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.488654 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfzx9\" (UniqueName: \"kubernetes.io/projected/01e80ddd-6532-4f89-9523-025e2cb55fb5-kube-api-access-rfzx9\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.488706 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.488875 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.517311 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfzx9\" (UniqueName: \"kubernetes.io/projected/01e80ddd-6532-4f89-9523-025e2cb55fb5-kube-api-access-rfzx9\") pod \"ovn-controller-vmnzt-config-6nbg7\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.816902 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:31:57 crc kubenswrapper[4954]: I1128 16:31:57.877042 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fca16299-6b70-4f89-920f-8c1fe550b8ab" path="/var/lib/kubelet/pods/fca16299-6b70-4f89-920f-8c1fe550b8ab/volumes" Nov 28 16:31:58 crc kubenswrapper[4954]: I1128 16:31:58.411994 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-vmnzt-config-6nbg7"] Nov 28 16:31:58 crc kubenswrapper[4954]: I1128 16:31:58.417499 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:31:58 crc kubenswrapper[4954]: E1128 16:31:58.417730 4954 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 16:31:58 crc kubenswrapper[4954]: E1128 16:31:58.417753 4954 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 16:31:58 crc kubenswrapper[4954]: E1128 16:31:58.417821 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift podName:6e076e8f-11b0-48ea-a26c-302df7a0ed2f nodeName:}" failed. No retries permitted until 2025-11-28 16:32:14.417787309 +0000 UTC m=+1287.809455860 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift") pod "swift-storage-0" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f") : configmap "swift-ring-files" not found Nov 28 16:31:58 crc kubenswrapper[4954]: I1128 16:31:58.620681 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt-config-6nbg7" event={"ID":"01e80ddd-6532-4f89-9523-025e2cb55fb5","Type":"ContainerStarted","Data":"f64645ec90ba1ee134c1a7b866b959d5a573834b1e31e720c165c7b9d697d0a6"} Nov 28 16:31:59 crc kubenswrapper[4954]: I1128 16:31:59.630430 4954 generic.go:334] "Generic (PLEG): container finished" podID="01e80ddd-6532-4f89-9523-025e2cb55fb5" containerID="7d6fe30a2f91cd279d988851b8d6ef64b6b98f0bd3cfe2f1f5f96da4fdaecd36" exitCode=0 Nov 28 16:31:59 crc kubenswrapper[4954]: I1128 16:31:59.630484 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt-config-6nbg7" event={"ID":"01e80ddd-6532-4f89-9523-025e2cb55fb5","Type":"ContainerDied","Data":"7d6fe30a2f91cd279d988851b8d6ef64b6b98f0bd3cfe2f1f5f96da4fdaecd36"} Nov 28 16:32:00 crc kubenswrapper[4954]: I1128 16:32:00.639479 4954 generic.go:334] "Generic (PLEG): container finished" podID="f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" containerID="64fd4fcf3a2617a8546b352438f8e3e0a582c3c419306e2e045c132b0817e838" exitCode=0 Nov 28 16:32:00 crc kubenswrapper[4954]: I1128 16:32:00.639676 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-24bxb" event={"ID":"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4","Type":"ContainerDied","Data":"64fd4fcf3a2617a8546b352438f8e3e0a582c3c419306e2e045c132b0817e838"} Nov 28 16:32:06 crc kubenswrapper[4954]: I1128 16:32:06.173660 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 16:32:06 crc kubenswrapper[4954]: I1128 16:32:06.521758 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:32:09 crc kubenswrapper[4954]: E1128 16:32:09.809029 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 28 16:32:09 crc kubenswrapper[4954]: E1128 16:32:09.809771 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kvggz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-6kcgd_openstack(1e50d182-9126-4ef1-a377-8620e0357d4d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:32:09 crc kubenswrapper[4954]: E1128 16:32:09.811232 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-6kcgd" podUID="1e50d182-9126-4ef1-a377-8620e0357d4d" Nov 28 16:32:09 crc kubenswrapper[4954]: I1128 16:32:09.843726 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:32:09 crc kubenswrapper[4954]: I1128 16:32:09.848721 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.009495 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfzx9\" (UniqueName: \"kubernetes.io/projected/01e80ddd-6532-4f89-9523-025e2cb55fb5-kube-api-access-rfzx9\") pod \"01e80ddd-6532-4f89-9523-025e2cb55fb5\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.009555 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-log-ovn\") pod \"01e80ddd-6532-4f89-9523-025e2cb55fb5\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.009579 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-scripts\") pod \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.009610 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-dispersionconf\") pod \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.009927 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run-ovn\") pod \"01e80ddd-6532-4f89-9523-025e2cb55fb5\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.009970 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-swiftconf\") pod \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010008 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run\") pod \"01e80ddd-6532-4f89-9523-025e2cb55fb5\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010037 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-etc-swift\") pod \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.009929 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "01e80ddd-6532-4f89-9523-025e2cb55fb5" (UID: "01e80ddd-6532-4f89-9523-025e2cb55fb5"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010033 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "01e80ddd-6532-4f89-9523-025e2cb55fb5" (UID: "01e80ddd-6532-4f89-9523-025e2cb55fb5"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010104 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run" (OuterVolumeSpecName: "var-run") pod "01e80ddd-6532-4f89-9523-025e2cb55fb5" (UID: "01e80ddd-6532-4f89-9523-025e2cb55fb5"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010160 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-ring-data-devices\") pod \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010437 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-additional-scripts\") pod \"01e80ddd-6532-4f89-9523-025e2cb55fb5\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010543 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-combined-ca-bundle\") pod \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010636 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" (UID: "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.010962 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "01e80ddd-6532-4f89-9523-025e2cb55fb5" (UID: "01e80ddd-6532-4f89-9523-025e2cb55fb5"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.011106 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5qhq\" (UniqueName: \"kubernetes.io/projected/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-kube-api-access-c5qhq\") pod \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\" (UID: \"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.011195 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-scripts\") pod \"01e80ddd-6532-4f89-9523-025e2cb55fb5\" (UID: \"01e80ddd-6532-4f89-9523-025e2cb55fb5\") " Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.011133 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" (UID: "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.012097 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-scripts" (OuterVolumeSpecName: "scripts") pod "01e80ddd-6532-4f89-9523-025e2cb55fb5" (UID: "01e80ddd-6532-4f89-9523-025e2cb55fb5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.012172 4954 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.012435 4954 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.012464 4954 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.012474 4954 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.012483 4954 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/01e80ddd-6532-4f89-9523-025e2cb55fb5-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.012493 4954 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.015788 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01e80ddd-6532-4f89-9523-025e2cb55fb5-kube-api-access-rfzx9" (OuterVolumeSpecName: "kube-api-access-rfzx9") pod "01e80ddd-6532-4f89-9523-025e2cb55fb5" (UID: "01e80ddd-6532-4f89-9523-025e2cb55fb5"). InnerVolumeSpecName "kube-api-access-rfzx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.027973 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-kube-api-access-c5qhq" (OuterVolumeSpecName: "kube-api-access-c5qhq") pod "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" (UID: "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4"). InnerVolumeSpecName "kube-api-access-c5qhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.029140 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" (UID: "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.035684 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-scripts" (OuterVolumeSpecName: "scripts") pod "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" (UID: "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.038062 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" (UID: "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.038636 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" (UID: "f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.114402 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.114440 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5qhq\" (UniqueName: \"kubernetes.io/projected/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-kube-api-access-c5qhq\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.114451 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/01e80ddd-6532-4f89-9523-025e2cb55fb5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.114460 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfzx9\" (UniqueName: \"kubernetes.io/projected/01e80ddd-6532-4f89-9523-025e2cb55fb5-kube-api-access-rfzx9\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.114469 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.114477 4954 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.114486 4954 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.767852 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt-config-6nbg7" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.767863 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt-config-6nbg7" event={"ID":"01e80ddd-6532-4f89-9523-025e2cb55fb5","Type":"ContainerDied","Data":"f64645ec90ba1ee134c1a7b866b959d5a573834b1e31e720c165c7b9d697d0a6"} Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.768223 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f64645ec90ba1ee134c1a7b866b959d5a573834b1e31e720c165c7b9d697d0a6" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.769710 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-24bxb" event={"ID":"f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4","Type":"ContainerDied","Data":"c891ba858da8a87f6977ecfe7f3888cf91cbcbe26b53105378a3ce0cf4898569"} Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.769753 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-24bxb" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.770001 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c891ba858da8a87f6977ecfe7f3888cf91cbcbe26b53105378a3ce0cf4898569" Nov 28 16:32:10 crc kubenswrapper[4954]: E1128 16:32:10.771443 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-6kcgd" podUID="1e50d182-9126-4ef1-a377-8620e0357d4d" Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.931976 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-vmnzt-config-6nbg7"] Nov 28 16:32:10 crc kubenswrapper[4954]: I1128 16:32:10.943231 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-vmnzt-config-6nbg7"] Nov 28 16:32:11 crc kubenswrapper[4954]: I1128 16:32:11.869763 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01e80ddd-6532-4f89-9523-025e2cb55fb5" path="/var/lib/kubelet/pods/01e80ddd-6532-4f89-9523-025e2cb55fb5/volumes" Nov 28 16:32:14 crc kubenswrapper[4954]: I1128 16:32:14.488789 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:32:14 crc kubenswrapper[4954]: I1128 16:32:14.495776 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"swift-storage-0\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " pod="openstack/swift-storage-0" Nov 28 16:32:14 crc kubenswrapper[4954]: I1128 16:32:14.635346 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:32:15 crc kubenswrapper[4954]: I1128 16:32:15.155628 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:32:15 crc kubenswrapper[4954]: W1128 16:32:15.166338 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e076e8f_11b0_48ea_a26c_302df7a0ed2f.slice/crio-816602151bb9f389d6417db9f5912a2b060b0665b424223e190bcfe2fb630c24 WatchSource:0}: Error finding container 816602151bb9f389d6417db9f5912a2b060b0665b424223e190bcfe2fb630c24: Status 404 returned error can't find the container with id 816602151bb9f389d6417db9f5912a2b060b0665b424223e190bcfe2fb630c24 Nov 28 16:32:15 crc kubenswrapper[4954]: I1128 16:32:15.815330 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"816602151bb9f389d6417db9f5912a2b060b0665b424223e190bcfe2fb630c24"} Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.168809 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.522083 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-dkjtq"] Nov 28 16:32:16 crc kubenswrapper[4954]: E1128 16:32:16.522591 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01e80ddd-6532-4f89-9523-025e2cb55fb5" containerName="ovn-config" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.522606 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="01e80ddd-6532-4f89-9523-025e2cb55fb5" containerName="ovn-config" Nov 28 16:32:16 crc kubenswrapper[4954]: E1128 16:32:16.522657 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" containerName="swift-ring-rebalance" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.522663 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" containerName="swift-ring-rebalance" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.522861 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="01e80ddd-6532-4f89-9523-025e2cb55fb5" containerName="ovn-config" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.522876 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" containerName="swift-ring-rebalance" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.523411 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.537978 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dkjtq"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.637802 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030c776d-b20a-4852-a19b-ccf67b8589b5-operator-scripts\") pod \"cinder-db-create-dkjtq\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.637868 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ll725\" (UniqueName: \"kubernetes.io/projected/030c776d-b20a-4852-a19b-ccf67b8589b5-kube-api-access-ll725\") pod \"cinder-db-create-dkjtq\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.639736 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-jt755"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.640747 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jt755" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.658575 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-jt755"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.740194 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2636eac0-4a90-4144-9172-16b5bcc80ca4-operator-scripts\") pod \"barbican-db-create-jt755\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " pod="openstack/barbican-db-create-jt755" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.740503 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030c776d-b20a-4852-a19b-ccf67b8589b5-operator-scripts\") pod \"cinder-db-create-dkjtq\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.740546 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ll725\" (UniqueName: \"kubernetes.io/projected/030c776d-b20a-4852-a19b-ccf67b8589b5-kube-api-access-ll725\") pod \"cinder-db-create-dkjtq\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.740570 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84mfr\" (UniqueName: \"kubernetes.io/projected/2636eac0-4a90-4144-9172-16b5bcc80ca4-kube-api-access-84mfr\") pod \"barbican-db-create-jt755\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " pod="openstack/barbican-db-create-jt755" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.741218 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030c776d-b20a-4852-a19b-ccf67b8589b5-operator-scripts\") pod \"cinder-db-create-dkjtq\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.743122 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-3a2d-account-create-update-tdzcg"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.744318 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.746017 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.757375 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-3a2d-account-create-update-tdzcg"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.771320 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ll725\" (UniqueName: \"kubernetes.io/projected/030c776d-b20a-4852-a19b-ccf67b8589b5-kube-api-access-ll725\") pod \"cinder-db-create-dkjtq\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.843596 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31e642ba-e4e1-42e7-aa70-6fa045bbc379-operator-scripts\") pod \"barbican-3a2d-account-create-update-tdzcg\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.843756 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnzml\" (UniqueName: \"kubernetes.io/projected/31e642ba-e4e1-42e7-aa70-6fa045bbc379-kube-api-access-lnzml\") pod \"barbican-3a2d-account-create-update-tdzcg\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.843865 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84mfr\" (UniqueName: \"kubernetes.io/projected/2636eac0-4a90-4144-9172-16b5bcc80ca4-kube-api-access-84mfr\") pod \"barbican-db-create-jt755\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " pod="openstack/barbican-db-create-jt755" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.843914 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2636eac0-4a90-4144-9172-16b5bcc80ca4-operator-scripts\") pod \"barbican-db-create-jt755\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " pod="openstack/barbican-db-create-jt755" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.844725 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2636eac0-4a90-4144-9172-16b5bcc80ca4-operator-scripts\") pod \"barbican-db-create-jt755\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " pod="openstack/barbican-db-create-jt755" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.845368 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"a3de1c50e569442c4d137eb6c4f01fa275602e3c8780f037bca31766cae6424c"} Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.845452 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"41ade0bb5410fb26da6929cbab355527e23f47061fb8a9b3b265c2dba9186585"} Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.850392 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.859455 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-d6b0-account-create-update-z5zkz"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.860851 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.866020 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d6b0-account-create-update-z5zkz"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.866826 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.886070 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84mfr\" (UniqueName: \"kubernetes.io/projected/2636eac0-4a90-4144-9172-16b5bcc80ca4-kube-api-access-84mfr\") pod \"barbican-db-create-jt755\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " pod="openstack/barbican-db-create-jt755" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.929800 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-ntrfz"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.931141 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.935206 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.944307 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tvqk5" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.944509 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.945503 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2mhs\" (UniqueName: \"kubernetes.io/projected/e880dbc3-1219-4582-b093-96e007d91831-kube-api-access-q2mhs\") pod \"cinder-d6b0-account-create-update-z5zkz\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.945584 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31e642ba-e4e1-42e7-aa70-6fa045bbc379-operator-scripts\") pod \"barbican-3a2d-account-create-update-tdzcg\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.945675 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnzml\" (UniqueName: \"kubernetes.io/projected/31e642ba-e4e1-42e7-aa70-6fa045bbc379-kube-api-access-lnzml\") pod \"barbican-3a2d-account-create-update-tdzcg\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.945816 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e880dbc3-1219-4582-b093-96e007d91831-operator-scripts\") pod \"cinder-d6b0-account-create-update-z5zkz\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.947833 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31e642ba-e4e1-42e7-aa70-6fa045bbc379-operator-scripts\") pod \"barbican-3a2d-account-create-update-tdzcg\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.948295 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.948548 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-ntrfz"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.963000 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-s75gb"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.969826 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.983131 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-s75gb"] Nov 28 16:32:16 crc kubenswrapper[4954]: I1128 16:32:16.984393 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnzml\" (UniqueName: \"kubernetes.io/projected/31e642ba-e4e1-42e7-aa70-6fa045bbc379-kube-api-access-lnzml\") pod \"barbican-3a2d-account-create-update-tdzcg\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.010727 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jt755" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.028025 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-9335-account-create-update-r8gqv"] Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.033953 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.042161 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.047945 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e880dbc3-1219-4582-b093-96e007d91831-operator-scripts\") pod \"cinder-d6b0-account-create-update-z5zkz\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.048004 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-config-data\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.048077 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c76dr\" (UniqueName: \"kubernetes.io/projected/032f188e-b87a-49fc-838f-f024006df4df-kube-api-access-c76dr\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.048137 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-combined-ca-bundle\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.048180 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2mhs\" (UniqueName: \"kubernetes.io/projected/e880dbc3-1219-4582-b093-96e007d91831-kube-api-access-q2mhs\") pod \"cinder-d6b0-account-create-update-z5zkz\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.049506 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e880dbc3-1219-4582-b093-96e007d91831-operator-scripts\") pod \"cinder-d6b0-account-create-update-z5zkz\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.074419 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9335-account-create-update-r8gqv"] Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.078184 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2mhs\" (UniqueName: \"kubernetes.io/projected/e880dbc3-1219-4582-b093-96e007d91831-kube-api-access-q2mhs\") pod \"cinder-d6b0-account-create-update-z5zkz\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.144822 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.152807 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-config-data\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.152864 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c76dr\" (UniqueName: \"kubernetes.io/projected/032f188e-b87a-49fc-838f-f024006df4df-kube-api-access-c76dr\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.152927 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/988f8c45-0585-47e8-b9aa-c02c53c62c75-operator-scripts\") pod \"neutron-9335-account-create-update-r8gqv\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.152957 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-combined-ca-bundle\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.152982 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb410d6d-f34f-4433-bab6-5a42c73408ab-operator-scripts\") pod \"neutron-db-create-s75gb\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.153012 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9r6p\" (UniqueName: \"kubernetes.io/projected/988f8c45-0585-47e8-b9aa-c02c53c62c75-kube-api-access-s9r6p\") pod \"neutron-9335-account-create-update-r8gqv\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.153039 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr7l2\" (UniqueName: \"kubernetes.io/projected/bb410d6d-f34f-4433-bab6-5a42c73408ab-kube-api-access-wr7l2\") pod \"neutron-db-create-s75gb\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.159760 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-config-data\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.160273 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-combined-ca-bundle\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.174635 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c76dr\" (UniqueName: \"kubernetes.io/projected/032f188e-b87a-49fc-838f-f024006df4df-kube-api-access-c76dr\") pod \"keystone-db-sync-ntrfz\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.224117 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.254456 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb410d6d-f34f-4433-bab6-5a42c73408ab-operator-scripts\") pod \"neutron-db-create-s75gb\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.254804 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9r6p\" (UniqueName: \"kubernetes.io/projected/988f8c45-0585-47e8-b9aa-c02c53c62c75-kube-api-access-s9r6p\") pod \"neutron-9335-account-create-update-r8gqv\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.254829 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr7l2\" (UniqueName: \"kubernetes.io/projected/bb410d6d-f34f-4433-bab6-5a42c73408ab-kube-api-access-wr7l2\") pod \"neutron-db-create-s75gb\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.254948 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/988f8c45-0585-47e8-b9aa-c02c53c62c75-operator-scripts\") pod \"neutron-9335-account-create-update-r8gqv\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.255355 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb410d6d-f34f-4433-bab6-5a42c73408ab-operator-scripts\") pod \"neutron-db-create-s75gb\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.255653 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/988f8c45-0585-47e8-b9aa-c02c53c62c75-operator-scripts\") pod \"neutron-9335-account-create-update-r8gqv\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.257222 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.423952 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr7l2\" (UniqueName: \"kubernetes.io/projected/bb410d6d-f34f-4433-bab6-5a42c73408ab-kube-api-access-wr7l2\") pod \"neutron-db-create-s75gb\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.427430 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9r6p\" (UniqueName: \"kubernetes.io/projected/988f8c45-0585-47e8-b9aa-c02c53c62c75-kube-api-access-s9r6p\") pod \"neutron-9335-account-create-update-r8gqv\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.500031 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-jt755"] Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.571478 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-dkjtq"] Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.613881 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.674889 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.788930 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-3a2d-account-create-update-tdzcg"] Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.881297 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jt755" event={"ID":"2636eac0-4a90-4144-9172-16b5bcc80ca4","Type":"ContainerStarted","Data":"15c2e10a42c88b7798a8ea9762ddb4837195bb4e39e2563522aa344c5853eeae"} Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.881339 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3a2d-account-create-update-tdzcg" event={"ID":"31e642ba-e4e1-42e7-aa70-6fa045bbc379","Type":"ContainerStarted","Data":"5a3a4a4137e124f96762782f53a3901435e42a041b867278c369e6c80bfd2bf4"} Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.884089 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dkjtq" event={"ID":"030c776d-b20a-4852-a19b-ccf67b8589b5","Type":"ContainerStarted","Data":"1ae004c51c8bc71d23cf602ea8a5bd5611aeddfa1f8041c0992f640329f1e0ca"} Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.888050 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"a2014dcb5213ccabf2e5a08d052c48b1fa541345bc969ed78a02b8acbd0b836e"} Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.888094 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"5bb02d057581590a07be4f6c68ca4784ff007688bf3a7b3a1009e0c9d6023107"} Nov 28 16:32:17 crc kubenswrapper[4954]: I1128 16:32:17.952012 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-d6b0-account-create-update-z5zkz"] Nov 28 16:32:17 crc kubenswrapper[4954]: W1128 16:32:17.960582 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode880dbc3_1219_4582_b093_96e007d91831.slice/crio-88b3ac1a6656b1f77b850b3851cbae60c38216665466b60fdc7e9c6c72f7fc41 WatchSource:0}: Error finding container 88b3ac1a6656b1f77b850b3851cbae60c38216665466b60fdc7e9c6c72f7fc41: Status 404 returned error can't find the container with id 88b3ac1a6656b1f77b850b3851cbae60c38216665466b60fdc7e9c6c72f7fc41 Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.020491 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-s75gb"] Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.073483 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-ntrfz"] Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.320100 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9335-account-create-update-r8gqv"] Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.900490 4954 generic.go:334] "Generic (PLEG): container finished" podID="31e642ba-e4e1-42e7-aa70-6fa045bbc379" containerID="c7d6a657408296b360af63d4c2d29f78837207a3fc468b6abbbb429d313f11c9" exitCode=0 Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.900571 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3a2d-account-create-update-tdzcg" event={"ID":"31e642ba-e4e1-42e7-aa70-6fa045bbc379","Type":"ContainerDied","Data":"c7d6a657408296b360af63d4c2d29f78837207a3fc468b6abbbb429d313f11c9"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.908633 4954 generic.go:334] "Generic (PLEG): container finished" podID="030c776d-b20a-4852-a19b-ccf67b8589b5" containerID="f7eddcd9f20ba7f9f2147ed350c853521c58d6d15dcfafe774c3baf1dca0680d" exitCode=0 Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.908694 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dkjtq" event={"ID":"030c776d-b20a-4852-a19b-ccf67b8589b5","Type":"ContainerDied","Data":"f7eddcd9f20ba7f9f2147ed350c853521c58d6d15dcfafe774c3baf1dca0680d"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.910880 4954 generic.go:334] "Generic (PLEG): container finished" podID="e880dbc3-1219-4582-b093-96e007d91831" containerID="70ce2a58db6b12e9c1165d80ed4370774b1cb1db85fb532b8fc97a52c67cbe79" exitCode=0 Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.910956 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d6b0-account-create-update-z5zkz" event={"ID":"e880dbc3-1219-4582-b093-96e007d91831","Type":"ContainerDied","Data":"70ce2a58db6b12e9c1165d80ed4370774b1cb1db85fb532b8fc97a52c67cbe79"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.910994 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d6b0-account-create-update-z5zkz" event={"ID":"e880dbc3-1219-4582-b093-96e007d91831","Type":"ContainerStarted","Data":"88b3ac1a6656b1f77b850b3851cbae60c38216665466b60fdc7e9c6c72f7fc41"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.921197 4954 generic.go:334] "Generic (PLEG): container finished" podID="2636eac0-4a90-4144-9172-16b5bcc80ca4" containerID="c2b37521bc0b4d9273f142d48f21d904914bf907956e7cd8621e25815eee2dd4" exitCode=0 Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.921278 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jt755" event={"ID":"2636eac0-4a90-4144-9172-16b5bcc80ca4","Type":"ContainerDied","Data":"c2b37521bc0b4d9273f142d48f21d904914bf907956e7cd8621e25815eee2dd4"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.923003 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ntrfz" event={"ID":"032f188e-b87a-49fc-838f-f024006df4df","Type":"ContainerStarted","Data":"fe12dd7f6415c4158a03116f9ece0c6685af1e52eca9724b0df5c4c4b80ea20f"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.924961 4954 generic.go:334] "Generic (PLEG): container finished" podID="bb410d6d-f34f-4433-bab6-5a42c73408ab" containerID="e140efb0c7b25f4ec739eacce54c1dd0ea255bfae35614850ec69f60dbf6c5dc" exitCode=0 Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.925088 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-s75gb" event={"ID":"bb410d6d-f34f-4433-bab6-5a42c73408ab","Type":"ContainerDied","Data":"e140efb0c7b25f4ec739eacce54c1dd0ea255bfae35614850ec69f60dbf6c5dc"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.925148 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-s75gb" event={"ID":"bb410d6d-f34f-4433-bab6-5a42c73408ab","Type":"ContainerStarted","Data":"df2245fd5ea6fc8afc799eb60e8a534883635baee88962d2254187e67d3270a1"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.928198 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9335-account-create-update-r8gqv" event={"ID":"988f8c45-0585-47e8-b9aa-c02c53c62c75","Type":"ContainerStarted","Data":"3b2e7c6f172b6a2ff64b3c07fb45f6a3eac2e679a49cb30045322a947be9c0a2"} Nov 28 16:32:18 crc kubenswrapper[4954]: I1128 16:32:18.928259 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9335-account-create-update-r8gqv" event={"ID":"988f8c45-0585-47e8-b9aa-c02c53c62c75","Type":"ContainerStarted","Data":"e11d7c34c997561a1213865d6c5e3cea6f2c42c8e38be2aa416add23cb96b3b5"} Nov 28 16:32:19 crc kubenswrapper[4954]: I1128 16:32:19.006267 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-9335-account-create-update-r8gqv" podStartSLOduration=2.006246763 podStartE2EDuration="2.006246763s" podCreationTimestamp="2025-11-28 16:32:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:18.996467026 +0000 UTC m=+1292.388135587" watchObservedRunningTime="2025-11-28 16:32:19.006246763 +0000 UTC m=+1292.397915304" Nov 28 16:32:19 crc kubenswrapper[4954]: I1128 16:32:19.939740 4954 generic.go:334] "Generic (PLEG): container finished" podID="988f8c45-0585-47e8-b9aa-c02c53c62c75" containerID="3b2e7c6f172b6a2ff64b3c07fb45f6a3eac2e679a49cb30045322a947be9c0a2" exitCode=0 Nov 28 16:32:19 crc kubenswrapper[4954]: I1128 16:32:19.939824 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9335-account-create-update-r8gqv" event={"ID":"988f8c45-0585-47e8-b9aa-c02c53c62c75","Type":"ContainerDied","Data":"3b2e7c6f172b6a2ff64b3c07fb45f6a3eac2e679a49cb30045322a947be9c0a2"} Nov 28 16:32:19 crc kubenswrapper[4954]: I1128 16:32:19.958386 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"201e26515a6013ab930eca8cf3032c3b5c434f99841737467e7187c6b2390519"} Nov 28 16:32:19 crc kubenswrapper[4954]: I1128 16:32:19.958459 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"d21ef8c65b44b65b7b352925f975d0e931485db1bbb827c59654bc000dd7fc7f"} Nov 28 16:32:19 crc kubenswrapper[4954]: I1128 16:32:19.958472 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"aa502d58af9315bcf5906e11b75470ef748ed5deffc2e4f87f6a86e7decf16e6"} Nov 28 16:32:19 crc kubenswrapper[4954]: I1128 16:32:19.958482 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"e269517f2a950366081e2a4332d4c243079436ed72f1864b1a8bba98e71728ad"} Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.456106 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.538433 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030c776d-b20a-4852-a19b-ccf67b8589b5-operator-scripts\") pod \"030c776d-b20a-4852-a19b-ccf67b8589b5\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.538646 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ll725\" (UniqueName: \"kubernetes.io/projected/030c776d-b20a-4852-a19b-ccf67b8589b5-kube-api-access-ll725\") pod \"030c776d-b20a-4852-a19b-ccf67b8589b5\" (UID: \"030c776d-b20a-4852-a19b-ccf67b8589b5\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.568798 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/030c776d-b20a-4852-a19b-ccf67b8589b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "030c776d-b20a-4852-a19b-ccf67b8589b5" (UID: "030c776d-b20a-4852-a19b-ccf67b8589b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.583291 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/030c776d-b20a-4852-a19b-ccf67b8589b5-kube-api-access-ll725" (OuterVolumeSpecName: "kube-api-access-ll725") pod "030c776d-b20a-4852-a19b-ccf67b8589b5" (UID: "030c776d-b20a-4852-a19b-ccf67b8589b5"). InnerVolumeSpecName "kube-api-access-ll725". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.641209 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/030c776d-b20a-4852-a19b-ccf67b8589b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.641586 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ll725\" (UniqueName: \"kubernetes.io/projected/030c776d-b20a-4852-a19b-ccf67b8589b5-kube-api-access-ll725\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.673012 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.676668 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.681791 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jt755" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.692175 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.742848 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wr7l2\" (UniqueName: \"kubernetes.io/projected/bb410d6d-f34f-4433-bab6-5a42c73408ab-kube-api-access-wr7l2\") pod \"bb410d6d-f34f-4433-bab6-5a42c73408ab\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.742890 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnzml\" (UniqueName: \"kubernetes.io/projected/31e642ba-e4e1-42e7-aa70-6fa045bbc379-kube-api-access-lnzml\") pod \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.742930 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84mfr\" (UniqueName: \"kubernetes.io/projected/2636eac0-4a90-4144-9172-16b5bcc80ca4-kube-api-access-84mfr\") pod \"2636eac0-4a90-4144-9172-16b5bcc80ca4\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.743001 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31e642ba-e4e1-42e7-aa70-6fa045bbc379-operator-scripts\") pod \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\" (UID: \"31e642ba-e4e1-42e7-aa70-6fa045bbc379\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.743067 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb410d6d-f34f-4433-bab6-5a42c73408ab-operator-scripts\") pod \"bb410d6d-f34f-4433-bab6-5a42c73408ab\" (UID: \"bb410d6d-f34f-4433-bab6-5a42c73408ab\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.743151 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2636eac0-4a90-4144-9172-16b5bcc80ca4-operator-scripts\") pod \"2636eac0-4a90-4144-9172-16b5bcc80ca4\" (UID: \"2636eac0-4a90-4144-9172-16b5bcc80ca4\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.743182 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2mhs\" (UniqueName: \"kubernetes.io/projected/e880dbc3-1219-4582-b093-96e007d91831-kube-api-access-q2mhs\") pod \"e880dbc3-1219-4582-b093-96e007d91831\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.743256 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e880dbc3-1219-4582-b093-96e007d91831-operator-scripts\") pod \"e880dbc3-1219-4582-b093-96e007d91831\" (UID: \"e880dbc3-1219-4582-b093-96e007d91831\") " Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.745125 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31e642ba-e4e1-42e7-aa70-6fa045bbc379-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "31e642ba-e4e1-42e7-aa70-6fa045bbc379" (UID: "31e642ba-e4e1-42e7-aa70-6fa045bbc379"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.745138 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e880dbc3-1219-4582-b093-96e007d91831-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e880dbc3-1219-4582-b093-96e007d91831" (UID: "e880dbc3-1219-4582-b093-96e007d91831"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.745731 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2636eac0-4a90-4144-9172-16b5bcc80ca4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2636eac0-4a90-4144-9172-16b5bcc80ca4" (UID: "2636eac0-4a90-4144-9172-16b5bcc80ca4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.746249 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb410d6d-f34f-4433-bab6-5a42c73408ab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bb410d6d-f34f-4433-bab6-5a42c73408ab" (UID: "bb410d6d-f34f-4433-bab6-5a42c73408ab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.749895 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31e642ba-e4e1-42e7-aa70-6fa045bbc379-kube-api-access-lnzml" (OuterVolumeSpecName: "kube-api-access-lnzml") pod "31e642ba-e4e1-42e7-aa70-6fa045bbc379" (UID: "31e642ba-e4e1-42e7-aa70-6fa045bbc379"). InnerVolumeSpecName "kube-api-access-lnzml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.750054 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb410d6d-f34f-4433-bab6-5a42c73408ab-kube-api-access-wr7l2" (OuterVolumeSpecName: "kube-api-access-wr7l2") pod "bb410d6d-f34f-4433-bab6-5a42c73408ab" (UID: "bb410d6d-f34f-4433-bab6-5a42c73408ab"). InnerVolumeSpecName "kube-api-access-wr7l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.751474 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2636eac0-4a90-4144-9172-16b5bcc80ca4-kube-api-access-84mfr" (OuterVolumeSpecName: "kube-api-access-84mfr") pod "2636eac0-4a90-4144-9172-16b5bcc80ca4" (UID: "2636eac0-4a90-4144-9172-16b5bcc80ca4"). InnerVolumeSpecName "kube-api-access-84mfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.760713 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e880dbc3-1219-4582-b093-96e007d91831-kube-api-access-q2mhs" (OuterVolumeSpecName: "kube-api-access-q2mhs") pod "e880dbc3-1219-4582-b093-96e007d91831" (UID: "e880dbc3-1219-4582-b093-96e007d91831"). InnerVolumeSpecName "kube-api-access-q2mhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847645 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e880dbc3-1219-4582-b093-96e007d91831-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847733 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wr7l2\" (UniqueName: \"kubernetes.io/projected/bb410d6d-f34f-4433-bab6-5a42c73408ab-kube-api-access-wr7l2\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847749 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnzml\" (UniqueName: \"kubernetes.io/projected/31e642ba-e4e1-42e7-aa70-6fa045bbc379-kube-api-access-lnzml\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847761 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84mfr\" (UniqueName: \"kubernetes.io/projected/2636eac0-4a90-4144-9172-16b5bcc80ca4-kube-api-access-84mfr\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847772 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31e642ba-e4e1-42e7-aa70-6fa045bbc379-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847785 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb410d6d-f34f-4433-bab6-5a42c73408ab-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847798 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2636eac0-4a90-4144-9172-16b5bcc80ca4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.847810 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2mhs\" (UniqueName: \"kubernetes.io/projected/e880dbc3-1219-4582-b093-96e007d91831-kube-api-access-q2mhs\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.967092 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-d6b0-account-create-update-z5zkz" event={"ID":"e880dbc3-1219-4582-b093-96e007d91831","Type":"ContainerDied","Data":"88b3ac1a6656b1f77b850b3851cbae60c38216665466b60fdc7e9c6c72f7fc41"} Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.967132 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88b3ac1a6656b1f77b850b3851cbae60c38216665466b60fdc7e9c6c72f7fc41" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.967257 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-d6b0-account-create-update-z5zkz" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.970346 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-jt755" event={"ID":"2636eac0-4a90-4144-9172-16b5bcc80ca4","Type":"ContainerDied","Data":"15c2e10a42c88b7798a8ea9762ddb4837195bb4e39e2563522aa344c5853eeae"} Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.970389 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15c2e10a42c88b7798a8ea9762ddb4837195bb4e39e2563522aa344c5853eeae" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.970391 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-jt755" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.972588 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-s75gb" event={"ID":"bb410d6d-f34f-4433-bab6-5a42c73408ab","Type":"ContainerDied","Data":"df2245fd5ea6fc8afc799eb60e8a534883635baee88962d2254187e67d3270a1"} Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.972616 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df2245fd5ea6fc8afc799eb60e8a534883635baee88962d2254187e67d3270a1" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.972706 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s75gb" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.976343 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3a2d-account-create-update-tdzcg" event={"ID":"31e642ba-e4e1-42e7-aa70-6fa045bbc379","Type":"ContainerDied","Data":"5a3a4a4137e124f96762782f53a3901435e42a041b867278c369e6c80bfd2bf4"} Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.976385 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a3a4a4137e124f96762782f53a3901435e42a041b867278c369e6c80bfd2bf4" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.976351 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3a2d-account-create-update-tdzcg" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.978640 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-dkjtq" Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.979248 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-dkjtq" event={"ID":"030c776d-b20a-4852-a19b-ccf67b8589b5","Type":"ContainerDied","Data":"1ae004c51c8bc71d23cf602ea8a5bd5611aeddfa1f8041c0992f640329f1e0ca"} Nov 28 16:32:20 crc kubenswrapper[4954]: I1128 16:32:20.979310 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ae004c51c8bc71d23cf602ea8a5bd5611aeddfa1f8041c0992f640329f1e0ca" Nov 28 16:32:23 crc kubenswrapper[4954]: I1128 16:32:23.195041 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:23 crc kubenswrapper[4954]: I1128 16:32:23.288663 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9r6p\" (UniqueName: \"kubernetes.io/projected/988f8c45-0585-47e8-b9aa-c02c53c62c75-kube-api-access-s9r6p\") pod \"988f8c45-0585-47e8-b9aa-c02c53c62c75\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " Nov 28 16:32:23 crc kubenswrapper[4954]: I1128 16:32:23.288890 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/988f8c45-0585-47e8-b9aa-c02c53c62c75-operator-scripts\") pod \"988f8c45-0585-47e8-b9aa-c02c53c62c75\" (UID: \"988f8c45-0585-47e8-b9aa-c02c53c62c75\") " Nov 28 16:32:23 crc kubenswrapper[4954]: I1128 16:32:23.289586 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/988f8c45-0585-47e8-b9aa-c02c53c62c75-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "988f8c45-0585-47e8-b9aa-c02c53c62c75" (UID: "988f8c45-0585-47e8-b9aa-c02c53c62c75"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:23 crc kubenswrapper[4954]: I1128 16:32:23.294386 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/988f8c45-0585-47e8-b9aa-c02c53c62c75-kube-api-access-s9r6p" (OuterVolumeSpecName: "kube-api-access-s9r6p") pod "988f8c45-0585-47e8-b9aa-c02c53c62c75" (UID: "988f8c45-0585-47e8-b9aa-c02c53c62c75"). InnerVolumeSpecName "kube-api-access-s9r6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:23 crc kubenswrapper[4954]: I1128 16:32:23.391132 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9r6p\" (UniqueName: \"kubernetes.io/projected/988f8c45-0585-47e8-b9aa-c02c53c62c75-kube-api-access-s9r6p\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:23 crc kubenswrapper[4954]: I1128 16:32:23.391166 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/988f8c45-0585-47e8-b9aa-c02c53c62c75-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:24 crc kubenswrapper[4954]: I1128 16:32:24.007374 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9335-account-create-update-r8gqv" event={"ID":"988f8c45-0585-47e8-b9aa-c02c53c62c75","Type":"ContainerDied","Data":"e11d7c34c997561a1213865d6c5e3cea6f2c42c8e38be2aa416add23cb96b3b5"} Nov 28 16:32:24 crc kubenswrapper[4954]: I1128 16:32:24.007416 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e11d7c34c997561a1213865d6c5e3cea6f2c42c8e38be2aa416add23cb96b3b5" Nov 28 16:32:24 crc kubenswrapper[4954]: I1128 16:32:24.007434 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9335-account-create-update-r8gqv" Nov 28 16:32:26 crc kubenswrapper[4954]: I1128 16:32:26.026224 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6kcgd" event={"ID":"1e50d182-9126-4ef1-a377-8620e0357d4d","Type":"ContainerStarted","Data":"aa31d4b7c23c133e9bd78b541ca96d27bc06d352ed360aed9ac2990e051d421b"} Nov 28 16:32:26 crc kubenswrapper[4954]: I1128 16:32:26.033081 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"dcc789be8b7c6143065659690f57fb8fb47dcf73a3d5b9f4d2416eef216a83d6"} Nov 28 16:32:26 crc kubenswrapper[4954]: I1128 16:32:26.033283 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"3a8c57cddf7184457522309c1f75ea85035dd861fa97da4dc3572e601c38586a"} Nov 28 16:32:26 crc kubenswrapper[4954]: I1128 16:32:26.033378 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"d6384cb554ea3695e28b1473c8d2c8cabce3a920e913bc674060bf33f2b8c9a0"} Nov 28 16:32:26 crc kubenswrapper[4954]: I1128 16:32:26.034881 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ntrfz" event={"ID":"032f188e-b87a-49fc-838f-f024006df4df","Type":"ContainerStarted","Data":"3ea77e957a4e39b75500e11e0d178d493dd4efdd5e75e1095f6f7266e8af1383"} Nov 28 16:32:26 crc kubenswrapper[4954]: I1128 16:32:26.046079 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-6kcgd" podStartSLOduration=4.20909022 podStartE2EDuration="37.04605842s" podCreationTimestamp="2025-11-28 16:31:49 +0000 UTC" firstStartedPulling="2025-11-28 16:31:52.44123842 +0000 UTC m=+1265.832906961" lastFinishedPulling="2025-11-28 16:32:25.27820662 +0000 UTC m=+1298.669875161" observedRunningTime="2025-11-28 16:32:26.044490881 +0000 UTC m=+1299.436159432" watchObservedRunningTime="2025-11-28 16:32:26.04605842 +0000 UTC m=+1299.437726971" Nov 28 16:32:26 crc kubenswrapper[4954]: I1128 16:32:26.063092 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-ntrfz" podStartSLOduration=2.87360112 podStartE2EDuration="10.063075078s" podCreationTimestamp="2025-11-28 16:32:16 +0000 UTC" firstStartedPulling="2025-11-28 16:32:18.087691259 +0000 UTC m=+1291.479359800" lastFinishedPulling="2025-11-28 16:32:25.277165217 +0000 UTC m=+1298.668833758" observedRunningTime="2025-11-28 16:32:26.061823898 +0000 UTC m=+1299.453492439" watchObservedRunningTime="2025-11-28 16:32:26.063075078 +0000 UTC m=+1299.454743619" Nov 28 16:32:27 crc kubenswrapper[4954]: I1128 16:32:27.118279 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"b3f9dee958ec0b21d8a91a7dab2c06039dfd85f545f18ca89dd9084c1a7e3b93"} Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.132407 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"f3f394c2aab82db3f2e4a7ff59593262ff98b913c3e8a66fa7b1fc990d8e2ded"} Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.132779 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"ec66bc366041d8589f9f9c599f24a864c0e0bb563e8e92223216a3166545a6a1"} Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.132792 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerStarted","Data":"a2bca9bc4ada3b3fc6bc0382c9d6662693c39fd5f31277e3ff70afcea99f052c"} Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.169722 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.060909299 podStartE2EDuration="47.169701646s" podCreationTimestamp="2025-11-28 16:31:41 +0000 UTC" firstStartedPulling="2025-11-28 16:32:15.168165914 +0000 UTC m=+1288.559834455" lastFinishedPulling="2025-11-28 16:32:25.276958251 +0000 UTC m=+1298.668626802" observedRunningTime="2025-11-28 16:32:28.168634462 +0000 UTC m=+1301.560303003" watchObservedRunningTime="2025-11-28 16:32:28.169701646 +0000 UTC m=+1301.561370187" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457384 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hcg7c"] Nov 28 16:32:28 crc kubenswrapper[4954]: E1128 16:32:28.457716 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="988f8c45-0585-47e8-b9aa-c02c53c62c75" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457733 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="988f8c45-0585-47e8-b9aa-c02c53c62c75" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: E1128 16:32:28.457748 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb410d6d-f34f-4433-bab6-5a42c73408ab" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457755 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb410d6d-f34f-4433-bab6-5a42c73408ab" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: E1128 16:32:28.457775 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e880dbc3-1219-4582-b093-96e007d91831" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457783 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e880dbc3-1219-4582-b093-96e007d91831" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: E1128 16:32:28.457792 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2636eac0-4a90-4144-9172-16b5bcc80ca4" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457799 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2636eac0-4a90-4144-9172-16b5bcc80ca4" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: E1128 16:32:28.457813 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="030c776d-b20a-4852-a19b-ccf67b8589b5" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457820 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="030c776d-b20a-4852-a19b-ccf67b8589b5" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: E1128 16:32:28.457831 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e642ba-e4e1-42e7-aa70-6fa045bbc379" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457837 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e642ba-e4e1-42e7-aa70-6fa045bbc379" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.457980 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="030c776d-b20a-4852-a19b-ccf67b8589b5" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.458003 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="988f8c45-0585-47e8-b9aa-c02c53c62c75" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.458014 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="31e642ba-e4e1-42e7-aa70-6fa045bbc379" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.458021 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2636eac0-4a90-4144-9172-16b5bcc80ca4" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.458032 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb410d6d-f34f-4433-bab6-5a42c73408ab" containerName="mariadb-database-create" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.458043 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="e880dbc3-1219-4582-b093-96e007d91831" containerName="mariadb-account-create-update" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.458857 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.462094 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.483585 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hcg7c"] Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.583759 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.583825 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.583854 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.583877 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-config\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.583913 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.584003 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2jt2\" (UniqueName: \"kubernetes.io/projected/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-kube-api-access-v2jt2\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.685406 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2jt2\" (UniqueName: \"kubernetes.io/projected/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-kube-api-access-v2jt2\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.685516 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.685591 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.685611 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.685628 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-config\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.685656 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.686647 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.687229 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.687801 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.688173 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.690198 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-config\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.707647 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2jt2\" (UniqueName: \"kubernetes.io/projected/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-kube-api-access-v2jt2\") pod \"dnsmasq-dns-5c79d794d7-hcg7c\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:28 crc kubenswrapper[4954]: I1128 16:32:28.778909 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:29 crc kubenswrapper[4954]: I1128 16:32:29.235151 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hcg7c"] Nov 28 16:32:30 crc kubenswrapper[4954]: I1128 16:32:30.150900 4954 generic.go:334] "Generic (PLEG): container finished" podID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerID="b55e0a2a280f6d2929b8103dd3e424f6354d0b938ccc43616f9454a7ff40cbff" exitCode=0 Nov 28 16:32:30 crc kubenswrapper[4954]: I1128 16:32:30.151037 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" event={"ID":"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990","Type":"ContainerDied","Data":"b55e0a2a280f6d2929b8103dd3e424f6354d0b938ccc43616f9454a7ff40cbff"} Nov 28 16:32:30 crc kubenswrapper[4954]: I1128 16:32:30.151269 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" event={"ID":"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990","Type":"ContainerStarted","Data":"74bbe490c7a44f116fd41f716ea7ac4450163616e3d8b4aeac9f1aa555706d7b"} Nov 28 16:32:31 crc kubenswrapper[4954]: I1128 16:32:31.163340 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" event={"ID":"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990","Type":"ContainerStarted","Data":"95ad2c8cdb16a956277ad78404c8589fdf671c65171d5ff54be69e302db1c281"} Nov 28 16:32:31 crc kubenswrapper[4954]: I1128 16:32:31.163841 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:31 crc kubenswrapper[4954]: I1128 16:32:31.167999 4954 generic.go:334] "Generic (PLEG): container finished" podID="032f188e-b87a-49fc-838f-f024006df4df" containerID="3ea77e957a4e39b75500e11e0d178d493dd4efdd5e75e1095f6f7266e8af1383" exitCode=0 Nov 28 16:32:31 crc kubenswrapper[4954]: I1128 16:32:31.168055 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ntrfz" event={"ID":"032f188e-b87a-49fc-838f-f024006df4df","Type":"ContainerDied","Data":"3ea77e957a4e39b75500e11e0d178d493dd4efdd5e75e1095f6f7266e8af1383"} Nov 28 16:32:31 crc kubenswrapper[4954]: I1128 16:32:31.187875 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" podStartSLOduration=3.187855843 podStartE2EDuration="3.187855843s" podCreationTimestamp="2025-11-28 16:32:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:31.186907963 +0000 UTC m=+1304.578576504" watchObservedRunningTime="2025-11-28 16:32:31.187855843 +0000 UTC m=+1304.579524384" Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.528069 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.655192 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c76dr\" (UniqueName: \"kubernetes.io/projected/032f188e-b87a-49fc-838f-f024006df4df-kube-api-access-c76dr\") pod \"032f188e-b87a-49fc-838f-f024006df4df\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.655311 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-config-data\") pod \"032f188e-b87a-49fc-838f-f024006df4df\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.655501 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-combined-ca-bundle\") pod \"032f188e-b87a-49fc-838f-f024006df4df\" (UID: \"032f188e-b87a-49fc-838f-f024006df4df\") " Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.660768 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/032f188e-b87a-49fc-838f-f024006df4df-kube-api-access-c76dr" (OuterVolumeSpecName: "kube-api-access-c76dr") pod "032f188e-b87a-49fc-838f-f024006df4df" (UID: "032f188e-b87a-49fc-838f-f024006df4df"). InnerVolumeSpecName "kube-api-access-c76dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.683364 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "032f188e-b87a-49fc-838f-f024006df4df" (UID: "032f188e-b87a-49fc-838f-f024006df4df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.715608 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-config-data" (OuterVolumeSpecName: "config-data") pod "032f188e-b87a-49fc-838f-f024006df4df" (UID: "032f188e-b87a-49fc-838f-f024006df4df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.757764 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c76dr\" (UniqueName: \"kubernetes.io/projected/032f188e-b87a-49fc-838f-f024006df4df-kube-api-access-c76dr\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.757807 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:32 crc kubenswrapper[4954]: I1128 16:32:32.757822 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/032f188e-b87a-49fc-838f-f024006df4df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.196444 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-ntrfz" event={"ID":"032f188e-b87a-49fc-838f-f024006df4df","Type":"ContainerDied","Data":"fe12dd7f6415c4158a03116f9ece0c6685af1e52eca9724b0df5c4c4b80ea20f"} Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.196760 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe12dd7f6415c4158a03116f9ece0c6685af1e52eca9724b0df5c4c4b80ea20f" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.196653 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-ntrfz" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.506134 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hcg7c"] Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.506399 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" podUID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerName="dnsmasq-dns" containerID="cri-o://95ad2c8cdb16a956277ad78404c8589fdf671c65171d5ff54be69e302db1c281" gracePeriod=10 Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.538919 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-42f68"] Nov 28 16:32:33 crc kubenswrapper[4954]: E1128 16:32:33.541231 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="032f188e-b87a-49fc-838f-f024006df4df" containerName="keystone-db-sync" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.541266 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="032f188e-b87a-49fc-838f-f024006df4df" containerName="keystone-db-sync" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.541568 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="032f188e-b87a-49fc-838f-f024006df4df" containerName="keystone-db-sync" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.544338 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.555895 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.555965 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.555918 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tvqk5" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.556330 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.556340 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.574649 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-42f68"] Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.673163 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-config-data\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.673212 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-scripts\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.673243 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-credential-keys\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.673313 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-fernet-keys\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.673341 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-combined-ca-bundle\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.673364 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqq2v\" (UniqueName: \"kubernetes.io/projected/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-kube-api-access-vqq2v\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.690274 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b868669f-d4l6p"] Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.692033 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.707689 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-d4l6p"] Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.774608 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.774662 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.774810 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-fernet-keys\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.774864 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njtwp\" (UniqueName: \"kubernetes.io/projected/7e9472a3-9824-414b-aba8-a5249d70f046-kube-api-access-njtwp\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.774929 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.774950 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-combined-ca-bundle\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.774976 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-config\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.775024 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqq2v\" (UniqueName: \"kubernetes.io/projected/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-kube-api-access-vqq2v\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.775173 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-config-data\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.775253 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-svc\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.775301 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-scripts\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.775350 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-credential-keys\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.797234 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-credential-keys\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.800476 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-combined-ca-bundle\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.801154 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-config-data\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.808081 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-scripts\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.816054 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-fernet-keys\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.816402 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqq2v\" (UniqueName: \"kubernetes.io/projected/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-kube-api-access-vqq2v\") pod \"keystone-bootstrap-42f68\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.878721 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-svc\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.878837 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.878859 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.878918 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njtwp\" (UniqueName: \"kubernetes.io/projected/7e9472a3-9824-414b-aba8-a5249d70f046-kube-api-access-njtwp\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.878964 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.878989 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-config\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.880122 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-config\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.880290 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.881408 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-svc\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.890062 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.890881 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.909661 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.932337 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njtwp\" (UniqueName: \"kubernetes.io/projected/7e9472a3-9824-414b-aba8-a5249d70f046-kube-api-access-njtwp\") pod \"dnsmasq-dns-5b868669f-d4l6p\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.947884 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.963828 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.963928 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.969110 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-v7lkp"] Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.970198 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.981971 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.985230 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-474mt" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.985503 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.985618 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.985916 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.994345 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-scripts\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.994577 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjbx2\" (UniqueName: \"kubernetes.io/projected/b8d4de09-a431-4eae-9ac8-23a33e1039b4-kube-api-access-zjbx2\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.994700 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-config-data\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.994769 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-log-httpd\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.994828 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-run-httpd\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.994910 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:33 crc kubenswrapper[4954]: I1128 16:32:33.994973 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.012759 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-sqc29"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.025816 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.033016 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7vnbv" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.033250 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.056655 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.068467 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-dblvm"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.077251 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.084655 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-lrht9" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.084983 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.085180 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.097747 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.097791 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.097817 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-scripts\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.097860 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-db-sync-config-data\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.097892 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-combined-ca-bundle\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.097936 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-etc-machine-id\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.097956 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-config-data\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098022 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzstd\" (UniqueName: \"kubernetes.io/projected/c10c6844-a43f-4167-90af-673636f4006b-kube-api-access-tzstd\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098046 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-scripts\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098074 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjbx2\" (UniqueName: \"kubernetes.io/projected/b8d4de09-a431-4eae-9ac8-23a33e1039b4-kube-api-access-zjbx2\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098105 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-db-sync-config-data\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098125 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-config-data\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098155 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-log-httpd\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098173 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-run-httpd\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098190 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wpc9\" (UniqueName: \"kubernetes.io/projected/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-kube-api-access-7wpc9\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.098217 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-combined-ca-bundle\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.107677 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-run-httpd\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.108053 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-log-httpd\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.117162 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-scripts\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.120260 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-config-data\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.120898 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.121151 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.149758 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sqc29"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.168815 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjbx2\" (UniqueName: \"kubernetes.io/projected/b8d4de09-a431-4eae-9ac8-23a33e1039b4-kube-api-access-zjbx2\") pod \"ceilometer-0\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.188670 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-v7lkp"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199517 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-combined-ca-bundle\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199625 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-db-sync-config-data\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199651 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-scripts\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199684 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-combined-ca-bundle\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199708 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-combined-ca-bundle\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199754 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vw7k4\" (UniqueName: \"kubernetes.io/projected/e78f971d-0af3-4789-a73a-b57790eb3dfa-kube-api-access-vw7k4\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199820 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-etc-machine-id\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199855 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-config-data\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199903 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzstd\" (UniqueName: \"kubernetes.io/projected/c10c6844-a43f-4167-90af-673636f4006b-kube-api-access-tzstd\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199946 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-config\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199970 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-db-sync-config-data\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.199997 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wpc9\" (UniqueName: \"kubernetes.io/projected/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-kube-api-access-7wpc9\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.202040 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-etc-machine-id\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.202491 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-dblvm"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.207139 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-combined-ca-bundle\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.207511 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-config-data\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.207806 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-db-sync-config-data\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.207878 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-db-sync-config-data\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.208953 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-scripts\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.209516 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-combined-ca-bundle\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.228266 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wpc9\" (UniqueName: \"kubernetes.io/projected/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-kube-api-access-7wpc9\") pod \"cinder-db-sync-v7lkp\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.229824 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzstd\" (UniqueName: \"kubernetes.io/projected/c10c6844-a43f-4167-90af-673636f4006b-kube-api-access-tzstd\") pod \"barbican-db-sync-sqc29\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.240913 4954 generic.go:334] "Generic (PLEG): container finished" podID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerID="95ad2c8cdb16a956277ad78404c8589fdf671c65171d5ff54be69e302db1c281" exitCode=0 Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.240950 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" event={"ID":"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990","Type":"ContainerDied","Data":"95ad2c8cdb16a956277ad78404c8589fdf671c65171d5ff54be69e302db1c281"} Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.261810 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-d4l6p"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.284607 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-p99dx"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.290109 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.293426 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-8b65x"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.296713 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.296829 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.297032 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hdtbn" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.297426 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.302435 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-combined-ca-bundle\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.309081 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vw7k4\" (UniqueName: \"kubernetes.io/projected/e78f971d-0af3-4789-a73a-b57790eb3dfa-kube-api-access-vw7k4\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.311324 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-config\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.316848 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-p99dx"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.316961 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-combined-ca-bundle\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.325796 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-8b65x"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.328382 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-config\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.348128 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vw7k4\" (UniqueName: \"kubernetes.io/projected/e78f971d-0af3-4789-a73a-b57790eb3dfa-kube-api-access-vw7k4\") pod \"neutron-db-sync-dblvm\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.357288 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.375497 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412533 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-config\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412579 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-config-data\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412601 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-combined-ca-bundle\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412631 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4rdw\" (UniqueName: \"kubernetes.io/projected/0d145bb5-4e17-4345-b697-a0273aef5f49-kube-api-access-r4rdw\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412657 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-svc\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412689 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr8k9\" (UniqueName: \"kubernetes.io/projected/d78cf560-4e31-47ac-a860-90a506397699-kube-api-access-sr8k9\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412714 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412770 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412798 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-scripts\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.412894 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d145bb5-4e17-4345-b697-a0273aef5f49-logs\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.413483 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.443230 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sqc29" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.474032 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dblvm" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514668 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-config\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514715 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-config-data\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514730 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-combined-ca-bundle\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514760 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4rdw\" (UniqueName: \"kubernetes.io/projected/0d145bb5-4e17-4345-b697-a0273aef5f49-kube-api-access-r4rdw\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514788 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-svc\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514818 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr8k9\" (UniqueName: \"kubernetes.io/projected/d78cf560-4e31-47ac-a860-90a506397699-kube-api-access-sr8k9\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514838 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514872 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514896 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-scripts\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514930 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d145bb5-4e17-4345-b697-a0273aef5f49-logs\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.514998 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.516004 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.516675 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-config\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.518529 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.518660 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-svc\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.519108 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d145bb5-4e17-4345-b697-a0273aef5f49-logs\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.519198 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.522105 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-config-data\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.523617 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-combined-ca-bundle\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.524664 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-scripts\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.535890 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr8k9\" (UniqueName: \"kubernetes.io/projected/d78cf560-4e31-47ac-a860-90a506397699-kube-api-access-sr8k9\") pod \"dnsmasq-dns-cf78879c9-8b65x\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.552703 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4rdw\" (UniqueName: \"kubernetes.io/projected/0d145bb5-4e17-4345-b697-a0273aef5f49-kube-api-access-r4rdw\") pod \"placement-db-sync-p99dx\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.590319 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.637021 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-p99dx" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.648695 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.716202 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-d4l6p"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.717935 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-nb\") pod \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.717966 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-svc\") pod \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.718077 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2jt2\" (UniqueName: \"kubernetes.io/projected/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-kube-api-access-v2jt2\") pod \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.718120 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-sb\") pod \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.718185 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-config\") pod \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.718273 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-swift-storage-0\") pod \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\" (UID: \"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990\") " Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.724335 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-42f68"] Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.744708 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-kube-api-access-v2jt2" (OuterVolumeSpecName: "kube-api-access-v2jt2") pod "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" (UID: "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990"). InnerVolumeSpecName "kube-api-access-v2jt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.821783 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2jt2\" (UniqueName: \"kubernetes.io/projected/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-kube-api-access-v2jt2\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.855024 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" (UID: "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.870838 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" (UID: "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.876166 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" (UID: "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.884699 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" (UID: "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.902529 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-config" (OuterVolumeSpecName: "config") pod "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" (UID: "4d5f1005-9b04-4e6b-9b73-b7ec83b4a990"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.924442 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.927113 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.927156 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.927169 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.927182 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:34 crc kubenswrapper[4954]: I1128 16:32:34.949631 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:32:34 crc kubenswrapper[4954]: W1128 16:32:34.969208 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8d4de09_a431_4eae_9ac8_23a33e1039b4.slice/crio-6e5406c54fffe7c2c114e96589ca2f0567bfafe41bcd2f68331983059117b6da WatchSource:0}: Error finding container 6e5406c54fffe7c2c114e96589ca2f0567bfafe41bcd2f68331983059117b6da: Status 404 returned error can't find the container with id 6e5406c54fffe7c2c114e96589ca2f0567bfafe41bcd2f68331983059117b6da Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.064651 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-v7lkp"] Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.105934 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-sqc29"] Nov 28 16:32:35 crc kubenswrapper[4954]: W1128 16:32:35.121503 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc10c6844_a43f_4167_90af_673636f4006b.slice/crio-e770cb873bbd65b1dce1e0071eb105ac6e5d38cdff0ac8d9f0cd6562070a4581 WatchSource:0}: Error finding container e770cb873bbd65b1dce1e0071eb105ac6e5d38cdff0ac8d9f0cd6562070a4581: Status 404 returned error can't find the container with id e770cb873bbd65b1dce1e0071eb105ac6e5d38cdff0ac8d9f0cd6562070a4581 Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.205050 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-dblvm"] Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.257900 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sqc29" event={"ID":"c10c6844-a43f-4167-90af-673636f4006b","Type":"ContainerStarted","Data":"e770cb873bbd65b1dce1e0071eb105ac6e5d38cdff0ac8d9f0cd6562070a4581"} Nov 28 16:32:35 crc kubenswrapper[4954]: W1128 16:32:35.258733 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode78f971d_0af3_4789_a73a_b57790eb3dfa.slice/crio-7dec7c1deaf936f88a2ffc6a4678ca1a783251b85e4d0131fe27418b1d22d287 WatchSource:0}: Error finding container 7dec7c1deaf936f88a2ffc6a4678ca1a783251b85e4d0131fe27418b1d22d287: Status 404 returned error can't find the container with id 7dec7c1deaf936f88a2ffc6a4678ca1a783251b85e4d0131fe27418b1d22d287 Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.262931 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-42f68" event={"ID":"274be6d5-d1d3-4c31-a3d1-d32a173a07c6","Type":"ContainerStarted","Data":"12f438b6bdc2aca45e92761c09020e8d83c9a7918f7f3aa86649cd411ca7a04c"} Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.262973 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-42f68" event={"ID":"274be6d5-d1d3-4c31-a3d1-d32a173a07c6","Type":"ContainerStarted","Data":"d50bc3ab265e65f7510d60674edde377ff7da527b9a43bf5231c2840518b2aff"} Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.267775 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerStarted","Data":"6e5406c54fffe7c2c114e96589ca2f0567bfafe41bcd2f68331983059117b6da"} Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.280256 4954 generic.go:334] "Generic (PLEG): container finished" podID="7e9472a3-9824-414b-aba8-a5249d70f046" containerID="eede0650f5565b5a4717ccb03b92efc80517eb486cb6e156c7edf17a383ffdba" exitCode=0 Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.280329 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-d4l6p" event={"ID":"7e9472a3-9824-414b-aba8-a5249d70f046","Type":"ContainerDied","Data":"eede0650f5565b5a4717ccb03b92efc80517eb486cb6e156c7edf17a383ffdba"} Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.280357 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-d4l6p" event={"ID":"7e9472a3-9824-414b-aba8-a5249d70f046","Type":"ContainerStarted","Data":"e94d7ecf78a8afea131b3267105c84c35bbd5c1508604e7713c79231333ed01a"} Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.284294 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-p99dx"] Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.289991 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.290050 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-hcg7c" event={"ID":"4d5f1005-9b04-4e6b-9b73-b7ec83b4a990","Type":"ContainerDied","Data":"74bbe490c7a44f116fd41f716ea7ac4450163616e3d8b4aeac9f1aa555706d7b"} Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.290114 4954 scope.go:117] "RemoveContainer" containerID="95ad2c8cdb16a956277ad78404c8589fdf671c65171d5ff54be69e302db1c281" Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.299410 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-v7lkp" event={"ID":"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e","Type":"ContainerStarted","Data":"68cdbb71e9706db4a377c86beb7c71c5f22e9eb11e855e0014eb791f583c2f7a"} Nov 28 16:32:35 crc kubenswrapper[4954]: W1128 16:32:35.305659 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d145bb5_4e17_4345_b697_a0273aef5f49.slice/crio-98cf4f476aed7115043c57ca4407e61dbb64cc571bdb4c70482676e82dcdd962 WatchSource:0}: Error finding container 98cf4f476aed7115043c57ca4407e61dbb64cc571bdb4c70482676e82dcdd962: Status 404 returned error can't find the container with id 98cf4f476aed7115043c57ca4407e61dbb64cc571bdb4c70482676e82dcdd962 Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.308391 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-42f68" podStartSLOduration=2.308347879 podStartE2EDuration="2.308347879s" podCreationTimestamp="2025-11-28 16:32:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:35.281093897 +0000 UTC m=+1308.672762428" watchObservedRunningTime="2025-11-28 16:32:35.308347879 +0000 UTC m=+1308.700016420" Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.341916 4954 scope.go:117] "RemoveContainer" containerID="b55e0a2a280f6d2929b8103dd3e424f6354d0b938ccc43616f9454a7ff40cbff" Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.342415 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hcg7c"] Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.364840 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-hcg7c"] Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.382871 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-8b65x"] Nov 28 16:32:35 crc kubenswrapper[4954]: I1128 16:32:35.883734 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" path="/var/lib/kubelet/pods/4d5f1005-9b04-4e6b-9b73-b7ec83b4a990/volumes" Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.319242 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-p99dx" event={"ID":"0d145bb5-4e17-4345-b697-a0273aef5f49","Type":"ContainerStarted","Data":"98cf4f476aed7115043c57ca4407e61dbb64cc571bdb4c70482676e82dcdd962"} Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.325068 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dblvm" event={"ID":"e78f971d-0af3-4789-a73a-b57790eb3dfa","Type":"ContainerStarted","Data":"9c6096808e547acb9a0bfe8d3ba36953837f47d196dc23225e34634802657a07"} Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.325129 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dblvm" event={"ID":"e78f971d-0af3-4789-a73a-b57790eb3dfa","Type":"ContainerStarted","Data":"7dec7c1deaf936f88a2ffc6a4678ca1a783251b85e4d0131fe27418b1d22d287"} Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.328193 4954 generic.go:334] "Generic (PLEG): container finished" podID="d78cf560-4e31-47ac-a860-90a506397699" containerID="948f8d2d3dfa8fd432e786a7f278885f930c82ce76cdabc35e38c4c28e3d82fc" exitCode=0 Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.328271 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" event={"ID":"d78cf560-4e31-47ac-a860-90a506397699","Type":"ContainerDied","Data":"948f8d2d3dfa8fd432e786a7f278885f930c82ce76cdabc35e38c4c28e3d82fc"} Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.328298 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" event={"ID":"d78cf560-4e31-47ac-a860-90a506397699","Type":"ContainerStarted","Data":"ac7b37fee30b5622230ca9adf4d88f71ddb237c2f24da3d1f5092c95992994a4"} Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.335292 4954 generic.go:334] "Generic (PLEG): container finished" podID="1e50d182-9126-4ef1-a377-8620e0357d4d" containerID="aa31d4b7c23c133e9bd78b541ca96d27bc06d352ed360aed9ac2990e051d421b" exitCode=0 Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.335719 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6kcgd" event={"ID":"1e50d182-9126-4ef1-a377-8620e0357d4d","Type":"ContainerDied","Data":"aa31d4b7c23c133e9bd78b541ca96d27bc06d352ed360aed9ac2990e051d421b"} Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.344181 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-dblvm" podStartSLOduration=3.344161025 podStartE2EDuration="3.344161025s" podCreationTimestamp="2025-11-28 16:32:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:36.34305555 +0000 UTC m=+1309.734724091" watchObservedRunningTime="2025-11-28 16:32:36.344161025 +0000 UTC m=+1309.735829566" Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.438925 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.895803 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.990809 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-swift-storage-0\") pod \"7e9472a3-9824-414b-aba8-a5249d70f046\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.991103 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-config\") pod \"7e9472a3-9824-414b-aba8-a5249d70f046\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.991242 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-svc\") pod \"7e9472a3-9824-414b-aba8-a5249d70f046\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.991374 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njtwp\" (UniqueName: \"kubernetes.io/projected/7e9472a3-9824-414b-aba8-a5249d70f046-kube-api-access-njtwp\") pod \"7e9472a3-9824-414b-aba8-a5249d70f046\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.991413 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-sb\") pod \"7e9472a3-9824-414b-aba8-a5249d70f046\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " Nov 28 16:32:36 crc kubenswrapper[4954]: I1128 16:32:36.991448 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-nb\") pod \"7e9472a3-9824-414b-aba8-a5249d70f046\" (UID: \"7e9472a3-9824-414b-aba8-a5249d70f046\") " Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.010920 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e9472a3-9824-414b-aba8-a5249d70f046-kube-api-access-njtwp" (OuterVolumeSpecName: "kube-api-access-njtwp") pod "7e9472a3-9824-414b-aba8-a5249d70f046" (UID: "7e9472a3-9824-414b-aba8-a5249d70f046"). InnerVolumeSpecName "kube-api-access-njtwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.025085 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7e9472a3-9824-414b-aba8-a5249d70f046" (UID: "7e9472a3-9824-414b-aba8-a5249d70f046"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.025594 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-config" (OuterVolumeSpecName: "config") pod "7e9472a3-9824-414b-aba8-a5249d70f046" (UID: "7e9472a3-9824-414b-aba8-a5249d70f046"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.032439 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7e9472a3-9824-414b-aba8-a5249d70f046" (UID: "7e9472a3-9824-414b-aba8-a5249d70f046"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.033426 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7e9472a3-9824-414b-aba8-a5249d70f046" (UID: "7e9472a3-9824-414b-aba8-a5249d70f046"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.065311 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7e9472a3-9824-414b-aba8-a5249d70f046" (UID: "7e9472a3-9824-414b-aba8-a5249d70f046"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.094163 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.094496 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.094511 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njtwp\" (UniqueName: \"kubernetes.io/projected/7e9472a3-9824-414b-aba8-a5249d70f046-kube-api-access-njtwp\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.094527 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.094556 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.094567 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e9472a3-9824-414b-aba8-a5249d70f046-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.352345 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" event={"ID":"d78cf560-4e31-47ac-a860-90a506397699","Type":"ContainerStarted","Data":"fa744681a479abb7c4ce5157461324894bb06c99dd13dc7d997298f892ffd35b"} Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.352475 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.356173 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-d4l6p" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.356219 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-d4l6p" event={"ID":"7e9472a3-9824-414b-aba8-a5249d70f046","Type":"ContainerDied","Data":"e94d7ecf78a8afea131b3267105c84c35bbd5c1508604e7713c79231333ed01a"} Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.356263 4954 scope.go:117] "RemoveContainer" containerID="eede0650f5565b5a4717ccb03b92efc80517eb486cb6e156c7edf17a383ffdba" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.381994 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" podStartSLOduration=3.381971524 podStartE2EDuration="3.381971524s" podCreationTimestamp="2025-11-28 16:32:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:32:37.370279654 +0000 UTC m=+1310.761948195" watchObservedRunningTime="2025-11-28 16:32:37.381971524 +0000 UTC m=+1310.773640075" Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.444386 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-d4l6p"] Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.452078 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-d4l6p"] Nov 28 16:32:37 crc kubenswrapper[4954]: I1128 16:32:37.869834 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e9472a3-9824-414b-aba8-a5249d70f046" path="/var/lib/kubelet/pods/7e9472a3-9824-414b-aba8-a5249d70f046/volumes" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.283640 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6kcgd" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.377158 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-combined-ca-bundle\") pod \"1e50d182-9126-4ef1-a377-8620e0357d4d\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.377224 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-db-sync-config-data\") pod \"1e50d182-9126-4ef1-a377-8620e0357d4d\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.377333 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvggz\" (UniqueName: \"kubernetes.io/projected/1e50d182-9126-4ef1-a377-8620e0357d4d-kube-api-access-kvggz\") pod \"1e50d182-9126-4ef1-a377-8620e0357d4d\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.377422 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-config-data\") pod \"1e50d182-9126-4ef1-a377-8620e0357d4d\" (UID: \"1e50d182-9126-4ef1-a377-8620e0357d4d\") " Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.384245 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1e50d182-9126-4ef1-a377-8620e0357d4d" (UID: "1e50d182-9126-4ef1-a377-8620e0357d4d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.384494 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e50d182-9126-4ef1-a377-8620e0357d4d-kube-api-access-kvggz" (OuterVolumeSpecName: "kube-api-access-kvggz") pod "1e50d182-9126-4ef1-a377-8620e0357d4d" (UID: "1e50d182-9126-4ef1-a377-8620e0357d4d"). InnerVolumeSpecName "kube-api-access-kvggz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.392743 4954 generic.go:334] "Generic (PLEG): container finished" podID="274be6d5-d1d3-4c31-a3d1-d32a173a07c6" containerID="12f438b6bdc2aca45e92761c09020e8d83c9a7918f7f3aa86649cd411ca7a04c" exitCode=0 Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.392831 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-42f68" event={"ID":"274be6d5-d1d3-4c31-a3d1-d32a173a07c6","Type":"ContainerDied","Data":"12f438b6bdc2aca45e92761c09020e8d83c9a7918f7f3aa86649cd411ca7a04c"} Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.394229 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6kcgd" event={"ID":"1e50d182-9126-4ef1-a377-8620e0357d4d","Type":"ContainerDied","Data":"4670b573a3dab28167da0e8c56eeaf569a64ddbef0b36cfe679a23326c7eb5bf"} Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.394264 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4670b573a3dab28167da0e8c56eeaf569a64ddbef0b36cfe679a23326c7eb5bf" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.394287 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6kcgd" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.414474 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e50d182-9126-4ef1-a377-8620e0357d4d" (UID: "1e50d182-9126-4ef1-a377-8620e0357d4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.439096 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-config-data" (OuterVolumeSpecName: "config-data") pod "1e50d182-9126-4ef1-a377-8620e0357d4d" (UID: "1e50d182-9126-4ef1-a377-8620e0357d4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.479669 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvggz\" (UniqueName: \"kubernetes.io/projected/1e50d182-9126-4ef1-a377-8620e0357d4d-kube-api-access-kvggz\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.479704 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.479716 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:40 crc kubenswrapper[4954]: I1128 16:32:40.479724 4954 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e50d182-9126-4ef1-a377-8620e0357d4d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.670293 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-8b65x"] Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.670887 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="dnsmasq-dns" containerID="cri-o://fa744681a479abb7c4ce5157461324894bb06c99dd13dc7d997298f892ffd35b" gracePeriod=10 Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.718830 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-vbxm2"] Nov 28 16:32:41 crc kubenswrapper[4954]: E1128 16:32:41.719273 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e9472a3-9824-414b-aba8-a5249d70f046" containerName="init" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.719285 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e9472a3-9824-414b-aba8-a5249d70f046" containerName="init" Nov 28 16:32:41 crc kubenswrapper[4954]: E1128 16:32:41.719304 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e50d182-9126-4ef1-a377-8620e0357d4d" containerName="glance-db-sync" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.719310 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e50d182-9126-4ef1-a377-8620e0357d4d" containerName="glance-db-sync" Nov 28 16:32:41 crc kubenswrapper[4954]: E1128 16:32:41.719339 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerName="dnsmasq-dns" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.719345 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerName="dnsmasq-dns" Nov 28 16:32:41 crc kubenswrapper[4954]: E1128 16:32:41.719356 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerName="init" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.719362 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerName="init" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.722108 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e9472a3-9824-414b-aba8-a5249d70f046" containerName="init" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.722154 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e50d182-9126-4ef1-a377-8620e0357d4d" containerName="glance-db-sync" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.722169 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d5f1005-9b04-4e6b-9b73-b7ec83b4a990" containerName="dnsmasq-dns" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.726813 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.787051 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-vbxm2"] Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.812195 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.812276 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.812314 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-config\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.812345 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.812371 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nbpv\" (UniqueName: \"kubernetes.io/projected/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-kube-api-access-4nbpv\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.812413 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.914305 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.914577 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nbpv\" (UniqueName: \"kubernetes.io/projected/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-kube-api-access-4nbpv\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.914636 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.914731 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.914773 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.914811 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-config\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.915576 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-config\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.915618 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.916485 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.917462 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.917622 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:41 crc kubenswrapper[4954]: I1128 16:32:41.937609 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nbpv\" (UniqueName: \"kubernetes.io/projected/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-kube-api-access-4nbpv\") pod \"dnsmasq-dns-56df8fb6b7-vbxm2\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.100615 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.423421 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" event={"ID":"d78cf560-4e31-47ac-a860-90a506397699","Type":"ContainerDied","Data":"fa744681a479abb7c4ce5157461324894bb06c99dd13dc7d997298f892ffd35b"} Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.423903 4954 generic.go:334] "Generic (PLEG): container finished" podID="d78cf560-4e31-47ac-a860-90a506397699" containerID="fa744681a479abb7c4ce5157461324894bb06c99dd13dc7d997298f892ffd35b" exitCode=0 Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.663232 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.664720 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.666933 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-z8dvv" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.668131 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.671183 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.689411 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.729109 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-scripts\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.729206 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.729239 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-logs\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.729311 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-config-data\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.729612 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mcx7\" (UniqueName: \"kubernetes.io/projected/a7151b57-96d5-4837-80c7-84a62098621c-kube-api-access-2mcx7\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.729676 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.729716 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.768063 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.769824 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.772618 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.783041 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831658 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831719 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mcx7\" (UniqueName: \"kubernetes.io/projected/a7151b57-96d5-4837-80c7-84a62098621c-kube-api-access-2mcx7\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831750 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831778 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831800 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831823 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831842 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-scripts\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831861 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9ptf\" (UniqueName: \"kubernetes.io/projected/fda3c03a-f00b-4968-823d-28c5be332572-kube-api-access-d9ptf\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831877 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831895 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831920 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-logs\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.831958 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-config-data\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.832007 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-logs\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.832023 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.832652 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.838246 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-logs\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.838246 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.841390 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-scripts\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.842922 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.844376 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-config-data\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.853225 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mcx7\" (UniqueName: \"kubernetes.io/projected/a7151b57-96d5-4837-80c7-84a62098621c-kube-api-access-2mcx7\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.866774 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " pod="openstack/glance-default-external-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.934242 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-logs\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.934297 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.934351 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.934419 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.934457 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.934487 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9ptf\" (UniqueName: \"kubernetes.io/projected/fda3c03a-f00b-4968-823d-28c5be332572-kube-api-access-d9ptf\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.934510 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.935229 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.935450 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.935506 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-logs\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.940442 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.940494 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.951743 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.955992 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9ptf\" (UniqueName: \"kubernetes.io/projected/fda3c03a-f00b-4968-823d-28c5be332572-kube-api-access-d9ptf\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.971934 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:32:42 crc kubenswrapper[4954]: I1128 16:32:42.995018 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.026455 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.041129 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.137813 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqq2v\" (UniqueName: \"kubernetes.io/projected/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-kube-api-access-vqq2v\") pod \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.137875 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-credential-keys\") pod \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.138023 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-fernet-keys\") pod \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.138087 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-config-data\") pod \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.138154 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-scripts\") pod \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.138188 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-combined-ca-bundle\") pod \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\" (UID: \"274be6d5-d1d3-4c31-a3d1-d32a173a07c6\") " Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.142350 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "274be6d5-d1d3-4c31-a3d1-d32a173a07c6" (UID: "274be6d5-d1d3-4c31-a3d1-d32a173a07c6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.143090 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-kube-api-access-vqq2v" (OuterVolumeSpecName: "kube-api-access-vqq2v") pod "274be6d5-d1d3-4c31-a3d1-d32a173a07c6" (UID: "274be6d5-d1d3-4c31-a3d1-d32a173a07c6"). InnerVolumeSpecName "kube-api-access-vqq2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.143450 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "274be6d5-d1d3-4c31-a3d1-d32a173a07c6" (UID: "274be6d5-d1d3-4c31-a3d1-d32a173a07c6"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.145425 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-scripts" (OuterVolumeSpecName: "scripts") pod "274be6d5-d1d3-4c31-a3d1-d32a173a07c6" (UID: "274be6d5-d1d3-4c31-a3d1-d32a173a07c6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.165463 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "274be6d5-d1d3-4c31-a3d1-d32a173a07c6" (UID: "274be6d5-d1d3-4c31-a3d1-d32a173a07c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.173563 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-config-data" (OuterVolumeSpecName: "config-data") pod "274be6d5-d1d3-4c31-a3d1-d32a173a07c6" (UID: "274be6d5-d1d3-4c31-a3d1-d32a173a07c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.240359 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.240393 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqq2v\" (UniqueName: \"kubernetes.io/projected/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-kube-api-access-vqq2v\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.240405 4954 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.240415 4954 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.240424 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.240432 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/274be6d5-d1d3-4c31-a3d1-d32a173a07c6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.435663 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-42f68" event={"ID":"274be6d5-d1d3-4c31-a3d1-d32a173a07c6","Type":"ContainerDied","Data":"d50bc3ab265e65f7510d60674edde377ff7da527b9a43bf5231c2840518b2aff"} Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.435710 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d50bc3ab265e65f7510d60674edde377ff7da527b9a43bf5231c2840518b2aff" Nov 28 16:32:43 crc kubenswrapper[4954]: I1128 16:32:43.435775 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-42f68" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.136035 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-42f68"] Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.144564 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-42f68"] Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.232851 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-s9vhw"] Nov 28 16:32:44 crc kubenswrapper[4954]: E1128 16:32:44.233359 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="274be6d5-d1d3-4c31-a3d1-d32a173a07c6" containerName="keystone-bootstrap" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.233383 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="274be6d5-d1d3-4c31-a3d1-d32a173a07c6" containerName="keystone-bootstrap" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.233644 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="274be6d5-d1d3-4c31-a3d1-d32a173a07c6" containerName="keystone-bootstrap" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.234240 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.237198 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.238081 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.238240 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.238352 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.238503 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-tvqk5" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.241894 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-s9vhw"] Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.257371 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfbx5\" (UniqueName: \"kubernetes.io/projected/597d1db1-e180-4bef-9c2c-d09998c23f2a-kube-api-access-cfbx5\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.257550 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.257626 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-fernet-keys\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.257655 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-config-data\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.257733 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-credential-keys\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.257828 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-scripts\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.359667 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-scripts\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.359879 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfbx5\" (UniqueName: \"kubernetes.io/projected/597d1db1-e180-4bef-9c2c-d09998c23f2a-kube-api-access-cfbx5\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.359947 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.359987 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-fernet-keys\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.360012 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-config-data\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.360051 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-credential-keys\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.366444 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-scripts\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.366452 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-credential-keys\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.366854 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-fernet-keys\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.373995 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.374672 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-config-data\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.379695 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfbx5\" (UniqueName: \"kubernetes.io/projected/597d1db1-e180-4bef-9c2c-d09998c23f2a-kube-api-access-cfbx5\") pod \"keystone-bootstrap-s9vhw\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.552979 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:32:44 crc kubenswrapper[4954]: I1128 16:32:44.650125 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Nov 28 16:32:45 crc kubenswrapper[4954]: I1128 16:32:45.073984 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:32:45 crc kubenswrapper[4954]: I1128 16:32:45.205105 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:32:45 crc kubenswrapper[4954]: I1128 16:32:45.869855 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="274be6d5-d1d3-4c31-a3d1-d32a173a07c6" path="/var/lib/kubelet/pods/274be6d5-d1d3-4c31-a3d1-d32a173a07c6/volumes" Nov 28 16:32:48 crc kubenswrapper[4954]: E1128 16:32:48.541859 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 28 16:32:48 crc kubenswrapper[4954]: E1128 16:32:48.542565 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tzstd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-sqc29_openstack(c10c6844-a43f-4167-90af-673636f4006b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:32:48 crc kubenswrapper[4954]: E1128 16:32:48.543775 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-sqc29" podUID="c10c6844-a43f-4167-90af-673636f4006b" Nov 28 16:32:49 crc kubenswrapper[4954]: E1128 16:32:49.493555 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-sqc29" podUID="c10c6844-a43f-4167-90af-673636f4006b" Nov 28 16:32:54 crc kubenswrapper[4954]: I1128 16:32:54.652519 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: i/o timeout" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.270599 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.426029 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-svc\") pod \"d78cf560-4e31-47ac-a860-90a506397699\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.426101 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-config\") pod \"d78cf560-4e31-47ac-a860-90a506397699\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.426198 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-swift-storage-0\") pod \"d78cf560-4e31-47ac-a860-90a506397699\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.426320 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sr8k9\" (UniqueName: \"kubernetes.io/projected/d78cf560-4e31-47ac-a860-90a506397699-kube-api-access-sr8k9\") pod \"d78cf560-4e31-47ac-a860-90a506397699\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.426435 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-nb\") pod \"d78cf560-4e31-47ac-a860-90a506397699\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.426983 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-sb\") pod \"d78cf560-4e31-47ac-a860-90a506397699\" (UID: \"d78cf560-4e31-47ac-a860-90a506397699\") " Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.431671 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d78cf560-4e31-47ac-a860-90a506397699-kube-api-access-sr8k9" (OuterVolumeSpecName: "kube-api-access-sr8k9") pod "d78cf560-4e31-47ac-a860-90a506397699" (UID: "d78cf560-4e31-47ac-a860-90a506397699"). InnerVolumeSpecName "kube-api-access-sr8k9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.474819 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d78cf560-4e31-47ac-a860-90a506397699" (UID: "d78cf560-4e31-47ac-a860-90a506397699"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.478197 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d78cf560-4e31-47ac-a860-90a506397699" (UID: "d78cf560-4e31-47ac-a860-90a506397699"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.482160 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-config" (OuterVolumeSpecName: "config") pod "d78cf560-4e31-47ac-a860-90a506397699" (UID: "d78cf560-4e31-47ac-a860-90a506397699"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.491369 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d78cf560-4e31-47ac-a860-90a506397699" (UID: "d78cf560-4e31-47ac-a860-90a506397699"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.493756 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d78cf560-4e31-47ac-a860-90a506397699" (UID: "d78cf560-4e31-47ac-a860-90a506397699"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.529188 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.529513 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sr8k9\" (UniqueName: \"kubernetes.io/projected/d78cf560-4e31-47ac-a860-90a506397699-kube-api-access-sr8k9\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.529548 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.529562 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.529574 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.529587 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d78cf560-4e31-47ac-a860-90a506397699-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.588461 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" event={"ID":"d78cf560-4e31-47ac-a860-90a506397699","Type":"ContainerDied","Data":"ac7b37fee30b5622230ca9adf4d88f71ddb237c2f24da3d1f5092c95992994a4"} Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.588547 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.588558 4954 scope.go:117] "RemoveContainer" containerID="fa744681a479abb7c4ce5157461324894bb06c99dd13dc7d997298f892ffd35b" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.622486 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-8b65x"] Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.631898 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-8b65x"] Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.653900 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf78879c9-8b65x" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: i/o timeout" Nov 28 16:32:59 crc kubenswrapper[4954]: I1128 16:32:59.867292 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d78cf560-4e31-47ac-a860-90a506397699" path="/var/lib/kubelet/pods/d78cf560-4e31-47ac-a860-90a506397699/volumes" Nov 28 16:33:01 crc kubenswrapper[4954]: I1128 16:33:01.223659 4954 scope.go:117] "RemoveContainer" containerID="948f8d2d3dfa8fd432e786a7f278885f930c82ce76cdabc35e38c4c28e3d82fc" Nov 28 16:33:01 crc kubenswrapper[4954]: E1128 16:33:01.401019 4954 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 16:33:01 crc kubenswrapper[4954]: E1128 16:33:01.401461 4954 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7wpc9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-v7lkp_openstack(2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 16:33:01 crc kubenswrapper[4954]: E1128 16:33:01.403010 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-v7lkp" podUID="2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" Nov 28 16:33:01 crc kubenswrapper[4954]: I1128 16:33:01.610679 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerStarted","Data":"4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e"} Nov 28 16:33:01 crc kubenswrapper[4954]: I1128 16:33:01.612923 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-p99dx" event={"ID":"0d145bb5-4e17-4345-b697-a0273aef5f49","Type":"ContainerStarted","Data":"975205d87b97524a1fbe645abb8c4c0762a091a627b07f886282ee646cab8c88"} Nov 28 16:33:01 crc kubenswrapper[4954]: E1128 16:33:01.616750 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-v7lkp" podUID="2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" Nov 28 16:33:01 crc kubenswrapper[4954]: I1128 16:33:01.646285 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-p99dx" podStartSLOduration=1.813409836 podStartE2EDuration="27.646257195s" podCreationTimestamp="2025-11-28 16:32:34 +0000 UTC" firstStartedPulling="2025-11-28 16:32:35.331718397 +0000 UTC m=+1308.723386938" lastFinishedPulling="2025-11-28 16:33:01.164565756 +0000 UTC m=+1334.556234297" observedRunningTime="2025-11-28 16:33:01.633604625 +0000 UTC m=+1335.025273176" watchObservedRunningTime="2025-11-28 16:33:01.646257195 +0000 UTC m=+1335.037925746" Nov 28 16:33:01 crc kubenswrapper[4954]: W1128 16:33:01.669275 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ea4a7d7_8fb8_4a12_936a_30fbcc5810c4.slice/crio-8cfaab159a84085345682ec6a24b2ad75a95873161b0929d46e08ef6a9819872 WatchSource:0}: Error finding container 8cfaab159a84085345682ec6a24b2ad75a95873161b0929d46e08ef6a9819872: Status 404 returned error can't find the container with id 8cfaab159a84085345682ec6a24b2ad75a95873161b0929d46e08ef6a9819872 Nov 28 16:33:01 crc kubenswrapper[4954]: I1128 16:33:01.686684 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-vbxm2"] Nov 28 16:33:01 crc kubenswrapper[4954]: I1128 16:33:01.961327 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-s9vhw"] Nov 28 16:33:01 crc kubenswrapper[4954]: I1128 16:33:01.961936 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.054711 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:02 crc kubenswrapper[4954]: W1128 16:33:02.055781 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7151b57_96d5_4837_80c7_84a62098621c.slice/crio-7bcf3dd126d8ea04960c6e07427533dcdb1b44a49796484c7cecd0515bccd969 WatchSource:0}: Error finding container 7bcf3dd126d8ea04960c6e07427533dcdb1b44a49796484c7cecd0515bccd969: Status 404 returned error can't find the container with id 7bcf3dd126d8ea04960c6e07427533dcdb1b44a49796484c7cecd0515bccd969 Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.480829 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.481268 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.629553 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerID="c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8" exitCode=0 Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.629627 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" event={"ID":"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4","Type":"ContainerDied","Data":"c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8"} Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.629700 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" event={"ID":"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4","Type":"ContainerStarted","Data":"8cfaab159a84085345682ec6a24b2ad75a95873161b0929d46e08ef6a9819872"} Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.659284 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-s9vhw" event={"ID":"597d1db1-e180-4bef-9c2c-d09998c23f2a","Type":"ContainerStarted","Data":"9675109c04b083255a1cea8272db303a0e47b5af1a63b40b8104a6ec81ac39f4"} Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.659618 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-s9vhw" event={"ID":"597d1db1-e180-4bef-9c2c-d09998c23f2a","Type":"ContainerStarted","Data":"3d1cc3e319b510cab5dcd7e20589fb96b63a53475cffb2a8ab37a1945e9768d3"} Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.663286 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7151b57-96d5-4837-80c7-84a62098621c","Type":"ContainerStarted","Data":"d8c547422a6a4edac0ef5037be9e67b3dee41df22344b8c6874df46f3b065368"} Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.663339 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7151b57-96d5-4837-80c7-84a62098621c","Type":"ContainerStarted","Data":"7bcf3dd126d8ea04960c6e07427533dcdb1b44a49796484c7cecd0515bccd969"} Nov 28 16:33:02 crc kubenswrapper[4954]: I1128 16:33:02.729278 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-s9vhw" podStartSLOduration=18.729248301 podStartE2EDuration="18.729248301s" podCreationTimestamp="2025-11-28 16:32:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:02.713854824 +0000 UTC m=+1336.105523365" watchObservedRunningTime="2025-11-28 16:33:02.729248301 +0000 UTC m=+1336.120916842" Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.135091 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:03 crc kubenswrapper[4954]: W1128 16:33:03.139564 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfda3c03a_f00b_4968_823d_28c5be332572.slice/crio-cd0ceceae22523164167a22e0f629dd38e38f247d743d8252470d3cd6f6e9e91 WatchSource:0}: Error finding container cd0ceceae22523164167a22e0f629dd38e38f247d743d8252470d3cd6f6e9e91: Status 404 returned error can't find the container with id cd0ceceae22523164167a22e0f629dd38e38f247d743d8252470d3cd6f6e9e91 Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.673552 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7151b57-96d5-4837-80c7-84a62098621c","Type":"ContainerStarted","Data":"e878ba4c3e6bf5130c0fd004406b54b3fafa3a10d256f3d232d4cd22b47340eb"} Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.673695 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-log" containerID="cri-o://d8c547422a6a4edac0ef5037be9e67b3dee41df22344b8c6874df46f3b065368" gracePeriod=30 Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.673765 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-httpd" containerID="cri-o://e878ba4c3e6bf5130c0fd004406b54b3fafa3a10d256f3d232d4cd22b47340eb" gracePeriod=30 Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.679629 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" event={"ID":"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4","Type":"ContainerStarted","Data":"4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af"} Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.683837 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda3c03a-f00b-4968-823d-28c5be332572","Type":"ContainerStarted","Data":"cd0ceceae22523164167a22e0f629dd38e38f247d743d8252470d3cd6f6e9e91"} Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.700972 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=22.700952582 podStartE2EDuration="22.700952582s" podCreationTimestamp="2025-11-28 16:32:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:03.699995081 +0000 UTC m=+1337.091663652" watchObservedRunningTime="2025-11-28 16:33:03.700952582 +0000 UTC m=+1337.092621123" Nov 28 16:33:03 crc kubenswrapper[4954]: I1128 16:33:03.719231 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" podStartSLOduration=22.719216499 podStartE2EDuration="22.719216499s" podCreationTimestamp="2025-11-28 16:32:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:03.716901486 +0000 UTC m=+1337.108570017" watchObservedRunningTime="2025-11-28 16:33:03.719216499 +0000 UTC m=+1337.110885040" Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.705719 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda3c03a-f00b-4968-823d-28c5be332572","Type":"ContainerStarted","Data":"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca"} Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.706341 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda3c03a-f00b-4968-823d-28c5be332572","Type":"ContainerStarted","Data":"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842"} Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.706478 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-log" containerID="cri-o://19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842" gracePeriod=30 Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.707043 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-httpd" containerID="cri-o://119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca" gracePeriod=30 Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.726248 4954 generic.go:334] "Generic (PLEG): container finished" podID="a7151b57-96d5-4837-80c7-84a62098621c" containerID="e878ba4c3e6bf5130c0fd004406b54b3fafa3a10d256f3d232d4cd22b47340eb" exitCode=0 Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.726393 4954 generic.go:334] "Generic (PLEG): container finished" podID="a7151b57-96d5-4837-80c7-84a62098621c" containerID="d8c547422a6a4edac0ef5037be9e67b3dee41df22344b8c6874df46f3b065368" exitCode=143 Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.727447 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7151b57-96d5-4837-80c7-84a62098621c","Type":"ContainerDied","Data":"e878ba4c3e6bf5130c0fd004406b54b3fafa3a10d256f3d232d4cd22b47340eb"} Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.727573 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7151b57-96d5-4837-80c7-84a62098621c","Type":"ContainerDied","Data":"d8c547422a6a4edac0ef5037be9e67b3dee41df22344b8c6874df46f3b065368"} Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.727980 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.739206 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=23.739184294 podStartE2EDuration="23.739184294s" podCreationTimestamp="2025-11-28 16:32:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:04.73174382 +0000 UTC m=+1338.123412361" watchObservedRunningTime="2025-11-28 16:33:04.739184294 +0000 UTC m=+1338.130852835" Nov 28 16:33:04 crc kubenswrapper[4954]: I1128 16:33:04.998568 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.117870 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-scripts\") pod \"a7151b57-96d5-4837-80c7-84a62098621c\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.117977 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-combined-ca-bundle\") pod \"a7151b57-96d5-4837-80c7-84a62098621c\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.118076 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-config-data\") pod \"a7151b57-96d5-4837-80c7-84a62098621c\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.118142 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mcx7\" (UniqueName: \"kubernetes.io/projected/a7151b57-96d5-4837-80c7-84a62098621c-kube-api-access-2mcx7\") pod \"a7151b57-96d5-4837-80c7-84a62098621c\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.118165 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-httpd-run\") pod \"a7151b57-96d5-4837-80c7-84a62098621c\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.118185 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"a7151b57-96d5-4837-80c7-84a62098621c\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.118201 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-logs\") pod \"a7151b57-96d5-4837-80c7-84a62098621c\" (UID: \"a7151b57-96d5-4837-80c7-84a62098621c\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.119012 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-logs" (OuterVolumeSpecName: "logs") pod "a7151b57-96d5-4837-80c7-84a62098621c" (UID: "a7151b57-96d5-4837-80c7-84a62098621c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.120974 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a7151b57-96d5-4837-80c7-84a62098621c" (UID: "a7151b57-96d5-4837-80c7-84a62098621c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.126466 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7151b57-96d5-4837-80c7-84a62098621c-kube-api-access-2mcx7" (OuterVolumeSpecName: "kube-api-access-2mcx7") pod "a7151b57-96d5-4837-80c7-84a62098621c" (UID: "a7151b57-96d5-4837-80c7-84a62098621c"). InnerVolumeSpecName "kube-api-access-2mcx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.130153 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-scripts" (OuterVolumeSpecName: "scripts") pod "a7151b57-96d5-4837-80c7-84a62098621c" (UID: "a7151b57-96d5-4837-80c7-84a62098621c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.140287 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "a7151b57-96d5-4837-80c7-84a62098621c" (UID: "a7151b57-96d5-4837-80c7-84a62098621c"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.172834 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7151b57-96d5-4837-80c7-84a62098621c" (UID: "a7151b57-96d5-4837-80c7-84a62098621c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.212909 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-config-data" (OuterVolumeSpecName: "config-data") pod "a7151b57-96d5-4837-80c7-84a62098621c" (UID: "a7151b57-96d5-4837-80c7-84a62098621c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.220023 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.220050 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mcx7\" (UniqueName: \"kubernetes.io/projected/a7151b57-96d5-4837-80c7-84a62098621c-kube-api-access-2mcx7\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.220062 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.220094 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.220105 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7151b57-96d5-4837-80c7-84a62098621c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.220113 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.220122 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7151b57-96d5-4837-80c7-84a62098621c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.244075 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.322170 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.375817 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.525793 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-httpd-run\") pod \"fda3c03a-f00b-4968-823d-28c5be332572\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.525851 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"fda3c03a-f00b-4968-823d-28c5be332572\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.525923 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-scripts\") pod \"fda3c03a-f00b-4968-823d-28c5be332572\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.525963 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-config-data\") pod \"fda3c03a-f00b-4968-823d-28c5be332572\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.526003 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9ptf\" (UniqueName: \"kubernetes.io/projected/fda3c03a-f00b-4968-823d-28c5be332572-kube-api-access-d9ptf\") pod \"fda3c03a-f00b-4968-823d-28c5be332572\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.526032 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-logs\") pod \"fda3c03a-f00b-4968-823d-28c5be332572\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.526072 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-combined-ca-bundle\") pod \"fda3c03a-f00b-4968-823d-28c5be332572\" (UID: \"fda3c03a-f00b-4968-823d-28c5be332572\") " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.527239 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fda3c03a-f00b-4968-823d-28c5be332572" (UID: "fda3c03a-f00b-4968-823d-28c5be332572"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.527621 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-logs" (OuterVolumeSpecName: "logs") pod "fda3c03a-f00b-4968-823d-28c5be332572" (UID: "fda3c03a-f00b-4968-823d-28c5be332572"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.533156 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda3c03a-f00b-4968-823d-28c5be332572-kube-api-access-d9ptf" (OuterVolumeSpecName: "kube-api-access-d9ptf") pod "fda3c03a-f00b-4968-823d-28c5be332572" (UID: "fda3c03a-f00b-4968-823d-28c5be332572"). InnerVolumeSpecName "kube-api-access-d9ptf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.534423 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "fda3c03a-f00b-4968-823d-28c5be332572" (UID: "fda3c03a-f00b-4968-823d-28c5be332572"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.536640 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-scripts" (OuterVolumeSpecName: "scripts") pod "fda3c03a-f00b-4968-823d-28c5be332572" (UID: "fda3c03a-f00b-4968-823d-28c5be332572"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.563148 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fda3c03a-f00b-4968-823d-28c5be332572" (UID: "fda3c03a-f00b-4968-823d-28c5be332572"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.588997 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-config-data" (OuterVolumeSpecName: "config-data") pod "fda3c03a-f00b-4968-823d-28c5be332572" (UID: "fda3c03a-f00b-4968-823d-28c5be332572"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.628510 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9ptf\" (UniqueName: \"kubernetes.io/projected/fda3c03a-f00b-4968-823d-28c5be332572-kube-api-access-d9ptf\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.628577 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.628591 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.628600 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda3c03a-f00b-4968-823d-28c5be332572-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.628628 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.628636 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.628646 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda3c03a-f00b-4968-823d-28c5be332572-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.652766 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.730543 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.738186 4954 generic.go:334] "Generic (PLEG): container finished" podID="597d1db1-e180-4bef-9c2c-d09998c23f2a" containerID="9675109c04b083255a1cea8272db303a0e47b5af1a63b40b8104a6ec81ac39f4" exitCode=0 Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.738268 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-s9vhw" event={"ID":"597d1db1-e180-4bef-9c2c-d09998c23f2a","Type":"ContainerDied","Data":"9675109c04b083255a1cea8272db303a0e47b5af1a63b40b8104a6ec81ac39f4"} Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.742851 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sqc29" event={"ID":"c10c6844-a43f-4167-90af-673636f4006b","Type":"ContainerStarted","Data":"d5248dbf6c49a21e5ab30bb4fdda938ceedcde3b2dae112a6d10da5d1ac67592"} Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.744778 4954 generic.go:334] "Generic (PLEG): container finished" podID="fda3c03a-f00b-4968-823d-28c5be332572" containerID="119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca" exitCode=0 Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.744810 4954 generic.go:334] "Generic (PLEG): container finished" podID="fda3c03a-f00b-4968-823d-28c5be332572" containerID="19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842" exitCode=143 Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.744832 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.744847 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda3c03a-f00b-4968-823d-28c5be332572","Type":"ContainerDied","Data":"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca"} Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.744870 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda3c03a-f00b-4968-823d-28c5be332572","Type":"ContainerDied","Data":"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842"} Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.744879 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda3c03a-f00b-4968-823d-28c5be332572","Type":"ContainerDied","Data":"cd0ceceae22523164167a22e0f629dd38e38f247d743d8252470d3cd6f6e9e91"} Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.744895 4954 scope.go:117] "RemoveContainer" containerID="119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.748179 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerStarted","Data":"dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63"} Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.753053 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.753752 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7151b57-96d5-4837-80c7-84a62098621c","Type":"ContainerDied","Data":"7bcf3dd126d8ea04960c6e07427533dcdb1b44a49796484c7cecd0515bccd969"} Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.776014 4954 scope.go:117] "RemoveContainer" containerID="19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.789916 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-sqc29" podStartSLOduration=2.431249472 podStartE2EDuration="32.789895362s" podCreationTimestamp="2025-11-28 16:32:33 +0000 UTC" firstStartedPulling="2025-11-28 16:32:35.124239731 +0000 UTC m=+1308.515908262" lastFinishedPulling="2025-11-28 16:33:05.482885611 +0000 UTC m=+1338.874554152" observedRunningTime="2025-11-28 16:33:05.781211727 +0000 UTC m=+1339.172880268" watchObservedRunningTime="2025-11-28 16:33:05.789895362 +0000 UTC m=+1339.181563903" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.846040 4954 scope.go:117] "RemoveContainer" containerID="119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca" Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.846594 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca\": container with ID starting with 119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca not found: ID does not exist" containerID="119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.846640 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca"} err="failed to get container status \"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca\": rpc error: code = NotFound desc = could not find container \"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca\": container with ID starting with 119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca not found: ID does not exist" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.846668 4954 scope.go:117] "RemoveContainer" containerID="19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842" Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.846991 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842\": container with ID starting with 19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842 not found: ID does not exist" containerID="19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.847025 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842"} err="failed to get container status \"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842\": rpc error: code = NotFound desc = could not find container \"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842\": container with ID starting with 19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842 not found: ID does not exist" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.847054 4954 scope.go:117] "RemoveContainer" containerID="119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.847358 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca"} err="failed to get container status \"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca\": rpc error: code = NotFound desc = could not find container \"119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca\": container with ID starting with 119040fc302878a45962a7ed4c4f600b3eaa8806a36dc184627522d04de28dca not found: ID does not exist" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.847403 4954 scope.go:117] "RemoveContainer" containerID="19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.847691 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842"} err="failed to get container status \"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842\": rpc error: code = NotFound desc = could not find container \"19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842\": container with ID starting with 19e1dc7130b43f955edb31ebf5c4981688cafd0483f9e55db78fe2633aea6842 not found: ID does not exist" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.847726 4954 scope.go:117] "RemoveContainer" containerID="e878ba4c3e6bf5130c0fd004406b54b3fafa3a10d256f3d232d4cd22b47340eb" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.874135 4954 scope.go:117] "RemoveContainer" containerID="d8c547422a6a4edac0ef5037be9e67b3dee41df22344b8c6874df46f3b065368" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.960378 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.978421 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.988701 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.989176 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="init" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989192 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="init" Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.989221 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="dnsmasq-dns" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989231 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="dnsmasq-dns" Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.989247 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-httpd" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989255 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-httpd" Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.989274 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-log" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989281 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-log" Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.989301 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-log" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989309 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-log" Nov 28 16:33:05 crc kubenswrapper[4954]: E1128 16:33:05.989317 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-httpd" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989323 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-httpd" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989593 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-httpd" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989616 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-log" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989627 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7151b57-96d5-4837-80c7-84a62098621c" containerName="glance-httpd" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989643 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="fda3c03a-f00b-4968-823d-28c5be332572" containerName="glance-log" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.989762 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d78cf560-4e31-47ac-a860-90a506397699" containerName="dnsmasq-dns" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.990966 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.995666 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.995807 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.995941 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-z8dvv" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.995989 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 16:33:05 crc kubenswrapper[4954]: I1128 16:33:05.997719 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.003748 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.014483 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.021658 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.023075 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.025315 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.025742 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.038229 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155203 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155254 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155340 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155379 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155403 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155451 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155484 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-scripts\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155512 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155552 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155575 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jshh2\" (UniqueName: \"kubernetes.io/projected/c3fb8465-a03c-4461-b334-42b1b20134c9-kube-api-access-jshh2\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155674 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcrwd\" (UniqueName: \"kubernetes.io/projected/64c2cd67-7241-451a-8002-2cf34bfccd66-kube-api-access-xcrwd\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155700 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-logs\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155721 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155749 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155772 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.155799 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-config-data\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.257465 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcrwd\" (UniqueName: \"kubernetes.io/projected/64c2cd67-7241-451a-8002-2cf34bfccd66-kube-api-access-xcrwd\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.257685 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-logs\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.257767 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.257804 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.257833 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.257884 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-config-data\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.257988 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258022 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-logs\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258065 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258207 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258242 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258309 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258351 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258391 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258428 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-scripts\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258457 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258467 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258555 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.258588 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jshh2\" (UniqueName: \"kubernetes.io/projected/c3fb8465-a03c-4461-b334-42b1b20134c9-kube-api-access-jshh2\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.262363 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.262416 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.263370 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.265207 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.265538 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.265829 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-config-data\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.267173 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.267922 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.274228 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-scripts\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.275437 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.276597 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.277592 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcrwd\" (UniqueName: \"kubernetes.io/projected/64c2cd67-7241-451a-8002-2cf34bfccd66-kube-api-access-xcrwd\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.278275 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jshh2\" (UniqueName: \"kubernetes.io/projected/c3fb8465-a03c-4461-b334-42b1b20134c9-kube-api-access-jshh2\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.297692 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.309748 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.312193 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.352155 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.769994 4954 generic.go:334] "Generic (PLEG): container finished" podID="0d145bb5-4e17-4345-b697-a0273aef5f49" containerID="975205d87b97524a1fbe645abb8c4c0762a091a627b07f886282ee646cab8c88" exitCode=0 Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.770078 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-p99dx" event={"ID":"0d145bb5-4e17-4345-b697-a0273aef5f49","Type":"ContainerDied","Data":"975205d87b97524a1fbe645abb8c4c0762a091a627b07f886282ee646cab8c88"} Nov 28 16:33:06 crc kubenswrapper[4954]: I1128 16:33:06.911182 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:33:06 crc kubenswrapper[4954]: W1128 16:33:06.922223 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64c2cd67_7241_451a_8002_2cf34bfccd66.slice/crio-f8d827ca9184bf882b0276fa7d72b76fb897653bc597b276fbbe7e60f3e48fa6 WatchSource:0}: Error finding container f8d827ca9184bf882b0276fa7d72b76fb897653bc597b276fbbe7e60f3e48fa6: Status 404 returned error can't find the container with id f8d827ca9184bf882b0276fa7d72b76fb897653bc597b276fbbe7e60f3e48fa6 Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.033943 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:33:07 crc kubenswrapper[4954]: W1128 16:33:07.078293 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3fb8465_a03c_4461_b334_42b1b20134c9.slice/crio-46d540bab171bf5d9bc29b6f3bf2acd912c6fdb5ed6f7721aba6603fe9f2b43d WatchSource:0}: Error finding container 46d540bab171bf5d9bc29b6f3bf2acd912c6fdb5ed6f7721aba6603fe9f2b43d: Status 404 returned error can't find the container with id 46d540bab171bf5d9bc29b6f3bf2acd912c6fdb5ed6f7721aba6603fe9f2b43d Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.104062 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.181983 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-cjcq7"] Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.182358 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" podUID="d3e62442-5977-4a34-b774-d11724b64832" containerName="dnsmasq-dns" containerID="cri-o://584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00" gracePeriod=10 Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.373811 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.487957 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbx5\" (UniqueName: \"kubernetes.io/projected/597d1db1-e180-4bef-9c2c-d09998c23f2a-kube-api-access-cfbx5\") pod \"597d1db1-e180-4bef-9c2c-d09998c23f2a\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.488026 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-fernet-keys\") pod \"597d1db1-e180-4bef-9c2c-d09998c23f2a\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.488189 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-scripts\") pod \"597d1db1-e180-4bef-9c2c-d09998c23f2a\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.488210 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle\") pod \"597d1db1-e180-4bef-9c2c-d09998c23f2a\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.488259 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-credential-keys\") pod \"597d1db1-e180-4bef-9c2c-d09998c23f2a\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.488288 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-config-data\") pod \"597d1db1-e180-4bef-9c2c-d09998c23f2a\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.506732 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-scripts" (OuterVolumeSpecName: "scripts") pod "597d1db1-e180-4bef-9c2c-d09998c23f2a" (UID: "597d1db1-e180-4bef-9c2c-d09998c23f2a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.513265 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "597d1db1-e180-4bef-9c2c-d09998c23f2a" (UID: "597d1db1-e180-4bef-9c2c-d09998c23f2a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.525991 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/597d1db1-e180-4bef-9c2c-d09998c23f2a-kube-api-access-cfbx5" (OuterVolumeSpecName: "kube-api-access-cfbx5") pod "597d1db1-e180-4bef-9c2c-d09998c23f2a" (UID: "597d1db1-e180-4bef-9c2c-d09998c23f2a"). InnerVolumeSpecName "kube-api-access-cfbx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:07 crc kubenswrapper[4954]: E1128 16:33:07.526024 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle podName:597d1db1-e180-4bef-9c2c-d09998c23f2a nodeName:}" failed. No retries permitted until 2025-11-28 16:33:08.025996793 +0000 UTC m=+1341.417665334 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle") pod "597d1db1-e180-4bef-9c2c-d09998c23f2a" (UID: "597d1db1-e180-4bef-9c2c-d09998c23f2a") : error deleting /var/lib/kubelet/pods/597d1db1-e180-4bef-9c2c-d09998c23f2a/volume-subpaths: remove /var/lib/kubelet/pods/597d1db1-e180-4bef-9c2c-d09998c23f2a/volume-subpaths: no such file or directory Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.530691 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-config-data" (OuterVolumeSpecName: "config-data") pod "597d1db1-e180-4bef-9c2c-d09998c23f2a" (UID: "597d1db1-e180-4bef-9c2c-d09998c23f2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.535794 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "597d1db1-e180-4bef-9c2c-d09998c23f2a" (UID: "597d1db1-e180-4bef-9c2c-d09998c23f2a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.590808 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbx5\" (UniqueName: \"kubernetes.io/projected/597d1db1-e180-4bef-9c2c-d09998c23f2a-kube-api-access-cfbx5\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.590863 4954 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.590874 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.590882 4954 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.590891 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.685288 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.797340 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-nb\") pod \"d3e62442-5977-4a34-b774-d11724b64832\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.798148 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-config\") pod \"d3e62442-5977-4a34-b774-d11724b64832\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.798461 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-sb\") pod \"d3e62442-5977-4a34-b774-d11724b64832\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.798614 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9j2pk\" (UniqueName: \"kubernetes.io/projected/d3e62442-5977-4a34-b774-d11724b64832-kube-api-access-9j2pk\") pod \"d3e62442-5977-4a34-b774-d11724b64832\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.798953 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-dns-svc\") pod \"d3e62442-5977-4a34-b774-d11724b64832\" (UID: \"d3e62442-5977-4a34-b774-d11724b64832\") " Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.807141 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3fb8465-a03c-4461-b334-42b1b20134c9","Type":"ContainerStarted","Data":"46d540bab171bf5d9bc29b6f3bf2acd912c6fdb5ed6f7721aba6603fe9f2b43d"} Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.808639 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-s9vhw" event={"ID":"597d1db1-e180-4bef-9c2c-d09998c23f2a","Type":"ContainerDied","Data":"3d1cc3e319b510cab5dcd7e20589fb96b63a53475cffb2a8ab37a1945e9768d3"} Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.808726 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d1cc3e319b510cab5dcd7e20589fb96b63a53475cffb2a8ab37a1945e9768d3" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.808814 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-s9vhw" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.810802 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"64c2cd67-7241-451a-8002-2cf34bfccd66","Type":"ContainerStarted","Data":"f8d827ca9184bf882b0276fa7d72b76fb897653bc597b276fbbe7e60f3e48fa6"} Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.812048 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3e62442-5977-4a34-b774-d11724b64832-kube-api-access-9j2pk" (OuterVolumeSpecName: "kube-api-access-9j2pk") pod "d3e62442-5977-4a34-b774-d11724b64832" (UID: "d3e62442-5977-4a34-b774-d11724b64832"). InnerVolumeSpecName "kube-api-access-9j2pk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.813793 4954 generic.go:334] "Generic (PLEG): container finished" podID="d3e62442-5977-4a34-b774-d11724b64832" containerID="584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00" exitCode=0 Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.814070 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.814956 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" event={"ID":"d3e62442-5977-4a34-b774-d11724b64832","Type":"ContainerDied","Data":"584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00"} Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.815061 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-cjcq7" event={"ID":"d3e62442-5977-4a34-b774-d11724b64832","Type":"ContainerDied","Data":"da42f3af8f9d562458d0bbdf43ef602d6a3f4a3a750f534cc4baa2a95753f550"} Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.815177 4954 scope.go:117] "RemoveContainer" containerID="584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.875377 4954 scope.go:117] "RemoveContainer" containerID="4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.909409 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7151b57-96d5-4837-80c7-84a62098621c" path="/var/lib/kubelet/pods/a7151b57-96d5-4837-80c7-84a62098621c/volumes" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.911565 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda3c03a-f00b-4968-823d-28c5be332572" path="/var/lib/kubelet/pods/fda3c03a-f00b-4968-823d-28c5be332572/volumes" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.912356 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9j2pk\" (UniqueName: \"kubernetes.io/projected/d3e62442-5977-4a34-b774-d11724b64832-kube-api-access-9j2pk\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.938357 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5767fbd5b4-jmjzp"] Nov 28 16:33:07 crc kubenswrapper[4954]: E1128 16:33:07.938823 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3e62442-5977-4a34-b774-d11724b64832" containerName="init" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.938839 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3e62442-5977-4a34-b774-d11724b64832" containerName="init" Nov 28 16:33:07 crc kubenswrapper[4954]: E1128 16:33:07.938849 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3e62442-5977-4a34-b774-d11724b64832" containerName="dnsmasq-dns" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.938857 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3e62442-5977-4a34-b774-d11724b64832" containerName="dnsmasq-dns" Nov 28 16:33:07 crc kubenswrapper[4954]: E1128 16:33:07.938871 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="597d1db1-e180-4bef-9c2c-d09998c23f2a" containerName="keystone-bootstrap" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.938879 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="597d1db1-e180-4bef-9c2c-d09998c23f2a" containerName="keystone-bootstrap" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.939134 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3e62442-5977-4a34-b774-d11724b64832" containerName="dnsmasq-dns" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.939151 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="597d1db1-e180-4bef-9c2c-d09998c23f2a" containerName="keystone-bootstrap" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.939772 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5767fbd5b4-jmjzp"] Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.939858 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.949312 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.949601 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.979209 4954 scope.go:117] "RemoveContainer" containerID="584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00" Nov 28 16:33:07 crc kubenswrapper[4954]: E1128 16:33:07.979853 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00\": container with ID starting with 584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00 not found: ID does not exist" containerID="584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.979892 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00"} err="failed to get container status \"584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00\": rpc error: code = NotFound desc = could not find container \"584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00\": container with ID starting with 584f51f065d5f968fd5955d046bd3591bdfd10e68862055befedb3dd8bcc7b00 not found: ID does not exist" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.979920 4954 scope.go:117] "RemoveContainer" containerID="4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7" Nov 28 16:33:07 crc kubenswrapper[4954]: E1128 16:33:07.980190 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7\": container with ID starting with 4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7 not found: ID does not exist" containerID="4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7" Nov 28 16:33:07 crc kubenswrapper[4954]: I1128 16:33:07.980300 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7"} err="failed to get container status \"4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7\": rpc error: code = NotFound desc = could not find container \"4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7\": container with ID starting with 4482a020a36152e953c4e94288151282e5536e09ea3c9c692110129abdd1d7c7 not found: ID does not exist" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.077981 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d3e62442-5977-4a34-b774-d11724b64832" (UID: "d3e62442-5977-4a34-b774-d11724b64832"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.091137 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d3e62442-5977-4a34-b774-d11724b64832" (UID: "d3e62442-5977-4a34-b774-d11724b64832"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.093178 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-config" (OuterVolumeSpecName: "config") pod "d3e62442-5977-4a34-b774-d11724b64832" (UID: "d3e62442-5977-4a34-b774-d11724b64832"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.110376 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d3e62442-5977-4a34-b774-d11724b64832" (UID: "d3e62442-5977-4a34-b774-d11724b64832"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.118408 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle\") pod \"597d1db1-e180-4bef-9c2c-d09998c23f2a\" (UID: \"597d1db1-e180-4bef-9c2c-d09998c23f2a\") " Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.119694 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-public-tls-certs\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.119861 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-internal-tls-certs\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.119924 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-credential-keys\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.119947 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-config-data\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.120961 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbjpb\" (UniqueName: \"kubernetes.io/projected/caa7b547-96ec-4119-87b1-fa14697ba9d1-kube-api-access-kbjpb\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.121084 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-combined-ca-bundle\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.121233 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-fernet-keys\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.121317 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-scripts\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.121431 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.121505 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.121600 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.121972 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d3e62442-5977-4a34-b774-d11724b64832-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.124674 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "597d1db1-e180-4bef-9c2c-d09998c23f2a" (UID: "597d1db1-e180-4bef-9c2c-d09998c23f2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.154754 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-p99dx" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.157662 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-cjcq7"] Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.177192 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-cjcq7"] Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.227510 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbjpb\" (UniqueName: \"kubernetes.io/projected/caa7b547-96ec-4119-87b1-fa14697ba9d1-kube-api-access-kbjpb\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.227586 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-combined-ca-bundle\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.227826 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-fernet-keys\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.227851 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-scripts\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.227892 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-public-tls-certs\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.228328 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-internal-tls-certs\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.228402 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-credential-keys\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.228424 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-config-data\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.228508 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/597d1db1-e180-4bef-9c2c-d09998c23f2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.235065 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-credential-keys\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.235693 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-scripts\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.235912 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-combined-ca-bundle\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.235978 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-internal-tls-certs\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.237738 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-fernet-keys\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.237965 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-public-tls-certs\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.246011 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-config-data\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.247758 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbjpb\" (UniqueName: \"kubernetes.io/projected/caa7b547-96ec-4119-87b1-fa14697ba9d1-kube-api-access-kbjpb\") pod \"keystone-5767fbd5b4-jmjzp\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.267819 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.331836 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-config-data\") pod \"0d145bb5-4e17-4345-b697-a0273aef5f49\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.331966 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d145bb5-4e17-4345-b697-a0273aef5f49-logs\") pod \"0d145bb5-4e17-4345-b697-a0273aef5f49\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.331995 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4rdw\" (UniqueName: \"kubernetes.io/projected/0d145bb5-4e17-4345-b697-a0273aef5f49-kube-api-access-r4rdw\") pod \"0d145bb5-4e17-4345-b697-a0273aef5f49\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.332080 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-combined-ca-bundle\") pod \"0d145bb5-4e17-4345-b697-a0273aef5f49\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.332215 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-scripts\") pod \"0d145bb5-4e17-4345-b697-a0273aef5f49\" (UID: \"0d145bb5-4e17-4345-b697-a0273aef5f49\") " Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.332656 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d145bb5-4e17-4345-b697-a0273aef5f49-logs" (OuterVolumeSpecName: "logs") pod "0d145bb5-4e17-4345-b697-a0273aef5f49" (UID: "0d145bb5-4e17-4345-b697-a0273aef5f49"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.333388 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d145bb5-4e17-4345-b697-a0273aef5f49-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.338484 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-scripts" (OuterVolumeSpecName: "scripts") pod "0d145bb5-4e17-4345-b697-a0273aef5f49" (UID: "0d145bb5-4e17-4345-b697-a0273aef5f49"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.341189 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d145bb5-4e17-4345-b697-a0273aef5f49-kube-api-access-r4rdw" (OuterVolumeSpecName: "kube-api-access-r4rdw") pod "0d145bb5-4e17-4345-b697-a0273aef5f49" (UID: "0d145bb5-4e17-4345-b697-a0273aef5f49"). InnerVolumeSpecName "kube-api-access-r4rdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.369782 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-config-data" (OuterVolumeSpecName: "config-data") pod "0d145bb5-4e17-4345-b697-a0273aef5f49" (UID: "0d145bb5-4e17-4345-b697-a0273aef5f49"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.377296 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d145bb5-4e17-4345-b697-a0273aef5f49" (UID: "0d145bb5-4e17-4345-b697-a0273aef5f49"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.434676 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.434703 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4rdw\" (UniqueName: \"kubernetes.io/projected/0d145bb5-4e17-4345-b697-a0273aef5f49-kube-api-access-r4rdw\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.434716 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.434728 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d145bb5-4e17-4345-b697-a0273aef5f49-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.808952 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5767fbd5b4-jmjzp"] Nov 28 16:33:08 crc kubenswrapper[4954]: W1128 16:33:08.821778 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaa7b547_96ec_4119_87b1_fa14697ba9d1.slice/crio-fe383ba7337414ab89ed6f97c783d772b2280604308842589f610caf6302960b WatchSource:0}: Error finding container fe383ba7337414ab89ed6f97c783d772b2280604308842589f610caf6302960b: Status 404 returned error can't find the container with id fe383ba7337414ab89ed6f97c783d772b2280604308842589f610caf6302960b Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.834126 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"64c2cd67-7241-451a-8002-2cf34bfccd66","Type":"ContainerStarted","Data":"0591103a92fe55b5aeaf7003da7124790b8840238c9ef95b8d6759987d2cbc7d"} Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.836962 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-p99dx" event={"ID":"0d145bb5-4e17-4345-b697-a0273aef5f49","Type":"ContainerDied","Data":"98cf4f476aed7115043c57ca4407e61dbb64cc571bdb4c70482676e82dcdd962"} Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.836989 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-p99dx" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.836998 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98cf4f476aed7115043c57ca4407e61dbb64cc571bdb4c70482676e82dcdd962" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.842470 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3fb8465-a03c-4461-b334-42b1b20134c9","Type":"ContainerStarted","Data":"ae66ea5bf4f0a053883318ea718224085c3f36d52e10c3bd4f3a097c2bdd7db0"} Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.963391 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-89b6465dd-zsqfd"] Nov 28 16:33:08 crc kubenswrapper[4954]: E1128 16:33:08.963889 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d145bb5-4e17-4345-b697-a0273aef5f49" containerName="placement-db-sync" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.963914 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d145bb5-4e17-4345-b697-a0273aef5f49" containerName="placement-db-sync" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.964159 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d145bb5-4e17-4345-b697-a0273aef5f49" containerName="placement-db-sync" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.965309 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.969119 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-hdtbn" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.969219 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.969357 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.969508 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.969651 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 16:33:08 crc kubenswrapper[4954]: I1128 16:33:08.971983 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-89b6465dd-zsqfd"] Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.153599 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-config-data\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.153919 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt64v\" (UniqueName: \"kubernetes.io/projected/78bd4de4-5601-4771-b15c-c240e097519b-kube-api-access-kt64v\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.153954 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78bd4de4-5601-4771-b15c-c240e097519b-logs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.153969 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-scripts\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.154126 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-internal-tls-certs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.154226 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-combined-ca-bundle\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.154300 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-public-tls-certs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.255769 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-combined-ca-bundle\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.255830 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-public-tls-certs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.255910 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-config-data\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.255933 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt64v\" (UniqueName: \"kubernetes.io/projected/78bd4de4-5601-4771-b15c-c240e097519b-kube-api-access-kt64v\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.255972 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78bd4de4-5601-4771-b15c-c240e097519b-logs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.255994 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-scripts\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.256068 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-internal-tls-certs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.256791 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78bd4de4-5601-4771-b15c-c240e097519b-logs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.267943 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-config-data\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.269981 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-scripts\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.270505 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-internal-tls-certs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.271110 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-public-tls-certs\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.283848 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-combined-ca-bundle\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.316725 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt64v\" (UniqueName: \"kubernetes.io/projected/78bd4de4-5601-4771-b15c-c240e097519b-kube-api-access-kt64v\") pod \"placement-89b6465dd-zsqfd\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.602904 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.871931 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3e62442-5977-4a34-b774-d11724b64832" path="/var/lib/kubelet/pods/d3e62442-5977-4a34-b774-d11724b64832/volumes" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.873194 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3fb8465-a03c-4461-b334-42b1b20134c9","Type":"ContainerStarted","Data":"d8e42a4506f0655575814f190360ca846d0c0bd0208364aa42a9f2af88ebdc26"} Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.873224 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"64c2cd67-7241-451a-8002-2cf34bfccd66","Type":"ContainerStarted","Data":"953e5aa2abeba459aed01ff7c56aad0fff93023dad7689d774e0c66ccf4b3866"} Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.875204 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5767fbd5b4-jmjzp" event={"ID":"caa7b547-96ec-4119-87b1-fa14697ba9d1","Type":"ContainerStarted","Data":"de3a57e387f1582f6570d0c5f5e659d725722d24b50d157c852c9079752bde22"} Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.875309 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5767fbd5b4-jmjzp" event={"ID":"caa7b547-96ec-4119-87b1-fa14697ba9d1","Type":"ContainerStarted","Data":"fe383ba7337414ab89ed6f97c783d772b2280604308842589f610caf6302960b"} Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.875467 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.907031 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.906996139 podStartE2EDuration="4.906996139s" podCreationTimestamp="2025-11-28 16:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:09.902220049 +0000 UTC m=+1343.293888600" watchObservedRunningTime="2025-11-28 16:33:09.906996139 +0000 UTC m=+1343.298664680" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.934812 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5767fbd5b4-jmjzp" podStartSLOduration=2.934796678 podStartE2EDuration="2.934796678s" podCreationTimestamp="2025-11-28 16:33:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:09.92380207 +0000 UTC m=+1343.315470611" watchObservedRunningTime="2025-11-28 16:33:09.934796678 +0000 UTC m=+1343.326465219" Nov 28 16:33:09 crc kubenswrapper[4954]: I1128 16:33:09.963672 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.963634169 podStartE2EDuration="4.963634169s" podCreationTimestamp="2025-11-28 16:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:09.960945114 +0000 UTC m=+1343.352613655" watchObservedRunningTime="2025-11-28 16:33:09.963634169 +0000 UTC m=+1343.355302700" Nov 28 16:33:12 crc kubenswrapper[4954]: I1128 16:33:12.907966 4954 generic.go:334] "Generic (PLEG): container finished" podID="c10c6844-a43f-4167-90af-673636f4006b" containerID="d5248dbf6c49a21e5ab30bb4fdda938ceedcde3b2dae112a6d10da5d1ac67592" exitCode=0 Nov 28 16:33:12 crc kubenswrapper[4954]: I1128 16:33:12.908042 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sqc29" event={"ID":"c10c6844-a43f-4167-90af-673636f4006b","Type":"ContainerDied","Data":"d5248dbf6c49a21e5ab30bb4fdda938ceedcde3b2dae112a6d10da5d1ac67592"} Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.190302 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-89b6465dd-zsqfd"] Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.920022 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerStarted","Data":"d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833"} Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.922564 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-89b6465dd-zsqfd" event={"ID":"78bd4de4-5601-4771-b15c-c240e097519b","Type":"ContainerStarted","Data":"b9815c657f848312db17e79942f5eddf0b33ee714edd4d18a3accefb78385c5d"} Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.922609 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-89b6465dd-zsqfd" event={"ID":"78bd4de4-5601-4771-b15c-c240e097519b","Type":"ContainerStarted","Data":"25d180f3bd81c9004a4db6ad4e61d48011cac275e60bfe32015be950a41aedcd"} Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.922623 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-89b6465dd-zsqfd" event={"ID":"78bd4de4-5601-4771-b15c-c240e097519b","Type":"ContainerStarted","Data":"88183e5b8a8e4859701caec0aed68ac6316a8e0641d681a574641c9b87e4d96f"} Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.922864 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.923115 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.924366 4954 generic.go:334] "Generic (PLEG): container finished" podID="e78f971d-0af3-4789-a73a-b57790eb3dfa" containerID="9c6096808e547acb9a0bfe8d3ba36953837f47d196dc23225e34634802657a07" exitCode=0 Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.924443 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dblvm" event={"ID":"e78f971d-0af3-4789-a73a-b57790eb3dfa","Type":"ContainerDied","Data":"9c6096808e547acb9a0bfe8d3ba36953837f47d196dc23225e34634802657a07"} Nov 28 16:33:13 crc kubenswrapper[4954]: I1128 16:33:13.952565 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-89b6465dd-zsqfd" podStartSLOduration=5.952517486 podStartE2EDuration="5.952517486s" podCreationTimestamp="2025-11-28 16:33:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:13.945664979 +0000 UTC m=+1347.337333530" watchObservedRunningTime="2025-11-28 16:33:13.952517486 +0000 UTC m=+1347.344186027" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.275701 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sqc29" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.346583 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzstd\" (UniqueName: \"kubernetes.io/projected/c10c6844-a43f-4167-90af-673636f4006b-kube-api-access-tzstd\") pod \"c10c6844-a43f-4167-90af-673636f4006b\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.346673 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-combined-ca-bundle\") pod \"c10c6844-a43f-4167-90af-673636f4006b\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.346795 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-db-sync-config-data\") pod \"c10c6844-a43f-4167-90af-673636f4006b\" (UID: \"c10c6844-a43f-4167-90af-673636f4006b\") " Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.351961 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c10c6844-a43f-4167-90af-673636f4006b-kube-api-access-tzstd" (OuterVolumeSpecName: "kube-api-access-tzstd") pod "c10c6844-a43f-4167-90af-673636f4006b" (UID: "c10c6844-a43f-4167-90af-673636f4006b"). InnerVolumeSpecName "kube-api-access-tzstd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.353740 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c10c6844-a43f-4167-90af-673636f4006b" (UID: "c10c6844-a43f-4167-90af-673636f4006b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.373609 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c10c6844-a43f-4167-90af-673636f4006b" (UID: "c10c6844-a43f-4167-90af-673636f4006b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.449021 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzstd\" (UniqueName: \"kubernetes.io/projected/c10c6844-a43f-4167-90af-673636f4006b-kube-api-access-tzstd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.449051 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.449060 4954 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c10c6844-a43f-4167-90af-673636f4006b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.939504 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-sqc29" event={"ID":"c10c6844-a43f-4167-90af-673636f4006b","Type":"ContainerDied","Data":"e770cb873bbd65b1dce1e0071eb105ac6e5d38cdff0ac8d9f0cd6562070a4581"} Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.939608 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e770cb873bbd65b1dce1e0071eb105ac6e5d38cdff0ac8d9f0cd6562070a4581" Nov 28 16:33:14 crc kubenswrapper[4954]: I1128 16:33:14.939980 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-sqc29" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.181326 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5b74b79fdc-99274"] Nov 28 16:33:15 crc kubenswrapper[4954]: E1128 16:33:15.182024 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10c6844-a43f-4167-90af-673636f4006b" containerName="barbican-db-sync" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.182036 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10c6844-a43f-4167-90af-673636f4006b" containerName="barbican-db-sync" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.182199 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10c6844-a43f-4167-90af-673636f4006b" containerName="barbican-db-sync" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.183240 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.186293 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7vnbv" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.197449 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.197844 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.198760 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5b74b79fdc-99274"] Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.264965 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl2j9\" (UniqueName: \"kubernetes.io/projected/9023ad7d-6621-4ed8-aec4-bd1d0db53088-kube-api-access-dl2j9\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.265026 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9023ad7d-6621-4ed8-aec4-bd1d0db53088-logs\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.265054 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-combined-ca-bundle\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.265371 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data-custom\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.265484 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.281506 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-64f4444cdb-4hdcg"] Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.283436 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.285621 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.291625 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-64f4444cdb-4hdcg"] Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.325995 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dblvm" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.335375 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-mcjkc"] Nov 28 16:33:15 crc kubenswrapper[4954]: E1128 16:33:15.335939 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e78f971d-0af3-4789-a73a-b57790eb3dfa" containerName="neutron-db-sync" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.335957 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e78f971d-0af3-4789-a73a-b57790eb3dfa" containerName="neutron-db-sync" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.336137 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="e78f971d-0af3-4789-a73a-b57790eb3dfa" containerName="neutron-db-sync" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.337083 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.353368 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-mcjkc"] Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.405696 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-combined-ca-bundle\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.405797 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data-custom\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.405826 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.406133 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.406177 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70613221-3087-4dc3-9f41-86eb6fe88041-logs\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.406261 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl2j9\" (UniqueName: \"kubernetes.io/projected/9023ad7d-6621-4ed8-aec4-bd1d0db53088-kube-api-access-dl2j9\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.406317 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lclcp\" (UniqueName: \"kubernetes.io/projected/70613221-3087-4dc3-9f41-86eb6fe88041-kube-api-access-lclcp\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.406345 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9023ad7d-6621-4ed8-aec4-bd1d0db53088-logs\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.406432 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-combined-ca-bundle\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.406481 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data-custom\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.410423 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9023ad7d-6621-4ed8-aec4-bd1d0db53088-logs\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.413705 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data-custom\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.432327 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.437371 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-combined-ca-bundle\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.446461 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl2j9\" (UniqueName: \"kubernetes.io/projected/9023ad7d-6621-4ed8-aec4-bd1d0db53088-kube-api-access-dl2j9\") pod \"barbican-worker-5b74b79fdc-99274\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.503060 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-786547d6db-f76dj"] Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.508745 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-config\") pod \"e78f971d-0af3-4789-a73a-b57790eb3dfa\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.516739 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vw7k4\" (UniqueName: \"kubernetes.io/projected/e78f971d-0af3-4789-a73a-b57790eb3dfa-kube-api-access-vw7k4\") pod \"e78f971d-0af3-4789-a73a-b57790eb3dfa\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.516886 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-combined-ca-bundle\") pod \"e78f971d-0af3-4789-a73a-b57790eb3dfa\" (UID: \"e78f971d-0af3-4789-a73a-b57790eb3dfa\") " Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.517590 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.517651 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.517756 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70613221-3087-4dc3-9f41-86eb6fe88041-logs\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.517794 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.517923 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lclcp\" (UniqueName: \"kubernetes.io/projected/70613221-3087-4dc3-9f41-86eb6fe88041-kube-api-access-lclcp\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.517974 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data-custom\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.518046 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-config\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.518097 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.518128 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.518152 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jpjp\" (UniqueName: \"kubernetes.io/projected/59a8644a-94d1-4074-8285-fc1e974dfae5-kube-api-access-2jpjp\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.518233 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-combined-ca-bundle\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.518787 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70613221-3087-4dc3-9f41-86eb6fe88041-logs\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.520789 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-786547d6db-f76dj"] Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.520988 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.524228 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data-custom\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.524434 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.531890 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.539549 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-combined-ca-bundle\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.544723 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lclcp\" (UniqueName: \"kubernetes.io/projected/70613221-3087-4dc3-9f41-86eb6fe88041-kube-api-access-lclcp\") pod \"barbican-keystone-listener-64f4444cdb-4hdcg\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.548638 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e78f971d-0af3-4789-a73a-b57790eb3dfa-kube-api-access-vw7k4" (OuterVolumeSpecName: "kube-api-access-vw7k4") pod "e78f971d-0af3-4789-a73a-b57790eb3dfa" (UID: "e78f971d-0af3-4789-a73a-b57790eb3dfa"). InnerVolumeSpecName "kube-api-access-vw7k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.569512 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e78f971d-0af3-4789-a73a-b57790eb3dfa" (UID: "e78f971d-0af3-4789-a73a-b57790eb3dfa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.581611 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-config" (OuterVolumeSpecName: "config") pod "e78f971d-0af3-4789-a73a-b57790eb3dfa" (UID: "e78f971d-0af3-4789-a73a-b57790eb3dfa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.604342 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.620301 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-config\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.620559 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jpjp\" (UniqueName: \"kubernetes.io/projected/59a8644a-94d1-4074-8285-fc1e974dfae5-kube-api-access-2jpjp\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.620644 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.620925 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621007 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxfkk\" (UniqueName: \"kubernetes.io/projected/6af9722b-be10-4c3e-9c73-96b114678d9d-kube-api-access-vxfkk\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621210 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6af9722b-be10-4c3e-9c73-96b114678d9d-logs\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621302 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621412 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621515 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data-custom\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621618 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621707 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-combined-ca-bundle\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621822 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.621991 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vw7k4\" (UniqueName: \"kubernetes.io/projected/e78f971d-0af3-4789-a73a-b57790eb3dfa-kube-api-access-vw7k4\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.622065 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e78f971d-0af3-4789-a73a-b57790eb3dfa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.622404 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.622420 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.622611 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.623330 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.628736 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-config\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.646255 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jpjp\" (UniqueName: \"kubernetes.io/projected/59a8644a-94d1-4074-8285-fc1e974dfae5-kube-api-access-2jpjp\") pod \"dnsmasq-dns-7c67bffd47-mcjkc\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.726600 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxfkk\" (UniqueName: \"kubernetes.io/projected/6af9722b-be10-4c3e-9c73-96b114678d9d-kube-api-access-vxfkk\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.726659 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6af9722b-be10-4c3e-9c73-96b114678d9d-logs\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.726808 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data-custom\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.726842 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.726869 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-combined-ca-bundle\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.728474 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6af9722b-be10-4c3e-9c73-96b114678d9d-logs\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.733492 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data-custom\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.733971 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-combined-ca-bundle\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.734081 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.749731 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxfkk\" (UniqueName: \"kubernetes.io/projected/6af9722b-be10-4c3e-9c73-96b114678d9d-kube-api-access-vxfkk\") pod \"barbican-api-786547d6db-f76dj\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.819763 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.856261 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.865031 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.980324 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dblvm" event={"ID":"e78f971d-0af3-4789-a73a-b57790eb3dfa","Type":"ContainerDied","Data":"7dec7c1deaf936f88a2ffc6a4678ca1a783251b85e4d0131fe27418b1d22d287"} Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.980364 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dec7c1deaf936f88a2ffc6a4678ca1a783251b85e4d0131fe27418b1d22d287" Nov 28 16:33:15 crc kubenswrapper[4954]: I1128 16:33:15.980451 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dblvm" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.085676 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5b74b79fdc-99274"] Nov 28 16:33:16 crc kubenswrapper[4954]: W1128 16:33:16.101616 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9023ad7d_6621_4ed8_aec4_bd1d0db53088.slice/crio-8455f37c6e73a768d424f4c43e7120c1cd2c6f075de8e2b4ab571f22475d3bf5 WatchSource:0}: Error finding container 8455f37c6e73a768d424f4c43e7120c1cd2c6f075de8e2b4ab571f22475d3bf5: Status 404 returned error can't find the container with id 8455f37c6e73a768d424f4c43e7120c1cd2c6f075de8e2b4ab571f22475d3bf5 Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.203276 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-mcjkc"] Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.275976 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-gb5zf"] Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.285814 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.287703 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-gb5zf"] Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.309821 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.310790 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.343836 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-config\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.343873 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.343912 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.343933 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.343958 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.344020 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r79tz\" (UniqueName: \"kubernetes.io/projected/d4f6d649-427f-4c3f-b541-67e3f529e218-kube-api-access-r79tz\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.352241 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.353343 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: W1128 16:33:16.359671 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70613221_3087_4dc3_9f41_86eb6fe88041.slice/crio-40cc15a3ffab4757a656853fe0b94cc825ab38bc670397b184e94b66d46182cf WatchSource:0}: Error finding container 40cc15a3ffab4757a656853fe0b94cc825ab38bc670397b184e94b66d46182cf: Status 404 returned error can't find the container with id 40cc15a3ffab4757a656853fe0b94cc825ab38bc670397b184e94b66d46182cf Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.360296 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.363941 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-64f4444cdb-4hdcg"] Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.375463 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.428069 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.450905 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-config\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.450949 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.450999 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.451018 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.451056 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.451124 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r79tz\" (UniqueName: \"kubernetes.io/projected/d4f6d649-427f-4c3f-b541-67e3f529e218-kube-api-access-r79tz\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.453048 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-config\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.453593 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.454354 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.454939 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.455818 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.468690 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-mcjkc"] Nov 28 16:33:16 crc kubenswrapper[4954]: W1128 16:33:16.493703 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59a8644a_94d1_4074_8285_fc1e974dfae5.slice/crio-6d9e2ff57b049f912f7f5d43bce7026e1efd2a2ffa7d9a8ccd44eb3be7752f92 WatchSource:0}: Error finding container 6d9e2ff57b049f912f7f5d43bce7026e1efd2a2ffa7d9a8ccd44eb3be7752f92: Status 404 returned error can't find the container with id 6d9e2ff57b049f912f7f5d43bce7026e1efd2a2ffa7d9a8ccd44eb3be7752f92 Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.494581 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.494911 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-67c7f88868-224xn"] Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.496682 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.504999 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.505060 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r79tz\" (UniqueName: \"kubernetes.io/projected/d4f6d649-427f-4c3f-b541-67e3f529e218-kube-api-access-r79tz\") pod \"dnsmasq-dns-848cf88cfc-gb5zf\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.505069 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-lrht9" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.505165 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.504999 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.511822 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67c7f88868-224xn"] Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.628371 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-786547d6db-f76dj"] Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.642764 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.654833 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-combined-ca-bundle\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.655078 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-httpd-config\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.655151 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-ovndb-tls-certs\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.655193 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-config\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.655366 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb44n\" (UniqueName: \"kubernetes.io/projected/7ee4f765-e243-4661-b118-dbf3414cfb2e-kube-api-access-sb44n\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.756718 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-combined-ca-bundle\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.756786 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-httpd-config\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.756819 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-ovndb-tls-certs\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.756838 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-config\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.756877 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb44n\" (UniqueName: \"kubernetes.io/projected/7ee4f765-e243-4661-b118-dbf3414cfb2e-kube-api-access-sb44n\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.770373 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-httpd-config\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.770399 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-ovndb-tls-certs\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.771095 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-combined-ca-bundle\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.771647 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-config\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.776987 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb44n\" (UniqueName: \"kubernetes.io/projected/7ee4f765-e243-4661-b118-dbf3414cfb2e-kube-api-access-sb44n\") pod \"neutron-67c7f88868-224xn\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.969174 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-gb5zf"] Nov 28 16:33:16 crc kubenswrapper[4954]: W1128 16:33:16.980368 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4f6d649_427f_4c3f_b541_67e3f529e218.slice/crio-ee1c872899ea27cffac0775bc47db274c9376471381656dcc1b236bcdec4056b WatchSource:0}: Error finding container ee1c872899ea27cffac0775bc47db274c9376471381656dcc1b236bcdec4056b: Status 404 returned error can't find the container with id ee1c872899ea27cffac0775bc47db274c9376471381656dcc1b236bcdec4056b Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.989301 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" event={"ID":"70613221-3087-4dc3-9f41-86eb6fe88041","Type":"ContainerStarted","Data":"40cc15a3ffab4757a656853fe0b94cc825ab38bc670397b184e94b66d46182cf"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.990645 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" event={"ID":"d4f6d649-427f-4c3f-b541-67e3f529e218","Type":"ContainerStarted","Data":"ee1c872899ea27cffac0775bc47db274c9376471381656dcc1b236bcdec4056b"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.992302 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-v7lkp" event={"ID":"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e","Type":"ContainerStarted","Data":"f657b195bba8965fc3348f96e5b3055b8c89ded434fec2bb372b073bc85deee0"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.993749 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b74b79fdc-99274" event={"ID":"9023ad7d-6621-4ed8-aec4-bd1d0db53088","Type":"ContainerStarted","Data":"8455f37c6e73a768d424f4c43e7120c1cd2c6f075de8e2b4ab571f22475d3bf5"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.995963 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-786547d6db-f76dj" event={"ID":"6af9722b-be10-4c3e-9c73-96b114678d9d","Type":"ContainerStarted","Data":"8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.995994 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-786547d6db-f76dj" event={"ID":"6af9722b-be10-4c3e-9c73-96b114678d9d","Type":"ContainerStarted","Data":"29ee47ca0e68ab917232eb38a608a4f182545ca8915533e473940fb09fb1aea6"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.999015 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" podUID="59a8644a-94d1-4074-8285-fc1e974dfae5" containerName="init" containerID="cri-o://c24645c471a6617b0c07e187c018d929e5ff5eeff30b71605439faefbe67c6de" gracePeriod=10 Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.999257 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" event={"ID":"59a8644a-94d1-4074-8285-fc1e974dfae5","Type":"ContainerStarted","Data":"c24645c471a6617b0c07e187c018d929e5ff5eeff30b71605439faefbe67c6de"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.999284 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" event={"ID":"59a8644a-94d1-4074-8285-fc1e974dfae5","Type":"ContainerStarted","Data":"6d9e2ff57b049f912f7f5d43bce7026e1efd2a2ffa7d9a8ccd44eb3be7752f92"} Nov 28 16:33:16 crc kubenswrapper[4954]: I1128 16:33:16.999301 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:17 crc kubenswrapper[4954]: I1128 16:33:17.001143 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:33:17 crc kubenswrapper[4954]: I1128 16:33:17.001807 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:17 crc kubenswrapper[4954]: I1128 16:33:17.001822 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:33:17 crc kubenswrapper[4954]: I1128 16:33:17.011901 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:17 crc kubenswrapper[4954]: I1128 16:33:17.027211 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-v7lkp" podStartSLOduration=3.6516712 podStartE2EDuration="44.027184149s" podCreationTimestamp="2025-11-28 16:32:33 +0000 UTC" firstStartedPulling="2025-11-28 16:32:35.07481723 +0000 UTC m=+1308.466485771" lastFinishedPulling="2025-11-28 16:33:15.450330179 +0000 UTC m=+1348.841998720" observedRunningTime="2025-11-28 16:33:17.00885884 +0000 UTC m=+1350.400527391" watchObservedRunningTime="2025-11-28 16:33:17.027184149 +0000 UTC m=+1350.418852710" Nov 28 16:33:17 crc kubenswrapper[4954]: I1128 16:33:17.613290 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67c7f88868-224xn"] Nov 28 16:33:18 crc kubenswrapper[4954]: I1128 16:33:18.032719 4954 generic.go:334] "Generic (PLEG): container finished" podID="59a8644a-94d1-4074-8285-fc1e974dfae5" containerID="c24645c471a6617b0c07e187c018d929e5ff5eeff30b71605439faefbe67c6de" exitCode=0 Nov 28 16:33:18 crc kubenswrapper[4954]: I1128 16:33:18.033000 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" event={"ID":"59a8644a-94d1-4074-8285-fc1e974dfae5","Type":"ContainerDied","Data":"c24645c471a6617b0c07e187c018d929e5ff5eeff30b71605439faefbe67c6de"} Nov 28 16:33:18 crc kubenswrapper[4954]: I1128 16:33:18.052023 4954 generic.go:334] "Generic (PLEG): container finished" podID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerID="7bf1e67c3967f633fafc7654998fcb77d65da7f515e548149bcc08b306834f9d" exitCode=0 Nov 28 16:33:18 crc kubenswrapper[4954]: I1128 16:33:18.052093 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" event={"ID":"d4f6d649-427f-4c3f-b541-67e3f529e218","Type":"ContainerDied","Data":"7bf1e67c3967f633fafc7654998fcb77d65da7f515e548149bcc08b306834f9d"} Nov 28 16:33:18 crc kubenswrapper[4954]: I1128 16:33:18.063044 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67c7f88868-224xn" event={"ID":"7ee4f765-e243-4661-b118-dbf3414cfb2e","Type":"ContainerStarted","Data":"0a4b1d579fc826aa27964640df0e192ce8cba1c6c97f0f55b4d56c3bea9bcd84"} Nov 28 16:33:18 crc kubenswrapper[4954]: I1128 16:33:18.070543 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-786547d6db-f76dj" event={"ID":"6af9722b-be10-4c3e-9c73-96b114678d9d","Type":"ContainerStarted","Data":"c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049"} Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.082834 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.083393 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.082875 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.083489 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.082933 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.083640 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.118327 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-786547d6db-f76dj" podStartSLOduration=4.118306077 podStartE2EDuration="4.118306077s" podCreationTimestamp="2025-11-28 16:33:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:19.118196354 +0000 UTC m=+1352.509864905" watchObservedRunningTime="2025-11-28 16:33:19.118306077 +0000 UTC m=+1352.509974618" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.259138 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-bdd54468f-gzf48"] Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.260779 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.265654 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.273999 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.283475 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-bdd54468f-gzf48"] Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.343457 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-config\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.343551 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p27vt\" (UniqueName: \"kubernetes.io/projected/6ce804c9-edba-4404-9099-4c0f102aa1b2-kube-api-access-p27vt\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.343620 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-internal-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.343714 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-combined-ca-bundle\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.343740 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-public-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.343789 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-ovndb-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.343819 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-httpd-config\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.446940 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-combined-ca-bundle\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.447273 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-public-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.447333 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-ovndb-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.447386 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-httpd-config\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.447503 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-config\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.447597 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p27vt\" (UniqueName: \"kubernetes.io/projected/6ce804c9-edba-4404-9099-4c0f102aa1b2-kube-api-access-p27vt\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.447689 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-internal-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.453884 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-combined-ca-bundle\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.455208 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-internal-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.457640 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-httpd-config\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.458252 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-ovndb-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.461459 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-public-tls-certs\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.463952 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-config\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.466078 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.481175 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p27vt\" (UniqueName: \"kubernetes.io/projected/6ce804c9-edba-4404-9099-4c0f102aa1b2-kube-api-access-p27vt\") pod \"neutron-bdd54468f-gzf48\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.495677 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.549270 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jpjp\" (UniqueName: \"kubernetes.io/projected/59a8644a-94d1-4074-8285-fc1e974dfae5-kube-api-access-2jpjp\") pod \"59a8644a-94d1-4074-8285-fc1e974dfae5\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.549343 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-nb\") pod \"59a8644a-94d1-4074-8285-fc1e974dfae5\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.549436 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-swift-storage-0\") pod \"59a8644a-94d1-4074-8285-fc1e974dfae5\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.549463 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-svc\") pod \"59a8644a-94d1-4074-8285-fc1e974dfae5\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.549504 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-config\") pod \"59a8644a-94d1-4074-8285-fc1e974dfae5\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.549562 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-sb\") pod \"59a8644a-94d1-4074-8285-fc1e974dfae5\" (UID: \"59a8644a-94d1-4074-8285-fc1e974dfae5\") " Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.554220 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59a8644a-94d1-4074-8285-fc1e974dfae5-kube-api-access-2jpjp" (OuterVolumeSpecName: "kube-api-access-2jpjp") pod "59a8644a-94d1-4074-8285-fc1e974dfae5" (UID: "59a8644a-94d1-4074-8285-fc1e974dfae5"). InnerVolumeSpecName "kube-api-access-2jpjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.578397 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "59a8644a-94d1-4074-8285-fc1e974dfae5" (UID: "59a8644a-94d1-4074-8285-fc1e974dfae5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.589082 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "59a8644a-94d1-4074-8285-fc1e974dfae5" (UID: "59a8644a-94d1-4074-8285-fc1e974dfae5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.589514 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "59a8644a-94d1-4074-8285-fc1e974dfae5" (UID: "59a8644a-94d1-4074-8285-fc1e974dfae5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.592382 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "59a8644a-94d1-4074-8285-fc1e974dfae5" (UID: "59a8644a-94d1-4074-8285-fc1e974dfae5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.595979 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-config" (OuterVolumeSpecName: "config") pod "59a8644a-94d1-4074-8285-fc1e974dfae5" (UID: "59a8644a-94d1-4074-8285-fc1e974dfae5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.603086 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.652363 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jpjp\" (UniqueName: \"kubernetes.io/projected/59a8644a-94d1-4074-8285-fc1e974dfae5-kube-api-access-2jpjp\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.652401 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.652410 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.652423 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.652433 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.652440 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59a8644a-94d1-4074-8285-fc1e974dfae5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.874572 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:19 crc kubenswrapper[4954]: I1128 16:33:19.996693 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.097264 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67c7f88868-224xn" event={"ID":"7ee4f765-e243-4661-b118-dbf3414cfb2e","Type":"ContainerStarted","Data":"a0dd28b9942fb8fdc120a20870acce41f7cec85c79b45f3505224a17e9537cbd"} Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.105321 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.105392 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-mcjkc" event={"ID":"59a8644a-94d1-4074-8285-fc1e974dfae5","Type":"ContainerDied","Data":"6d9e2ff57b049f912f7f5d43bce7026e1efd2a2ffa7d9a8ccd44eb3be7752f92"} Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.105437 4954 scope.go:117] "RemoveContainer" containerID="c24645c471a6617b0c07e187c018d929e5ff5eeff30b71605439faefbe67c6de" Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.108481 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.110881 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" event={"ID":"d4f6d649-427f-4c3f-b541-67e3f529e218","Type":"ContainerStarted","Data":"c0637cf4f8c41fde1febf14d841ba1af4f1baa489f4900f64fb3bbec798b9cb8"} Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.110944 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.169880 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" podStartSLOduration=4.16986613 podStartE2EDuration="4.16986613s" podCreationTimestamp="2025-11-28 16:33:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:20.145729028 +0000 UTC m=+1353.537397579" watchObservedRunningTime="2025-11-28 16:33:20.16986613 +0000 UTC m=+1353.561534671" Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.224513 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-mcjkc"] Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.240612 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-mcjkc"] Nov 28 16:33:20 crc kubenswrapper[4954]: I1128 16:33:20.265679 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-bdd54468f-gzf48"] Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.121501 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bdd54468f-gzf48" event={"ID":"6ce804c9-edba-4404-9099-4c0f102aa1b2","Type":"ContainerStarted","Data":"6247dea11c8baa76d5504944acc0e1d5678e3686708ccb7d89f295eda08a5665"} Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.935338 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59a8644a-94d1-4074-8285-fc1e974dfae5" path="/var/lib/kubelet/pods/59a8644a-94d1-4074-8285-fc1e974dfae5/volumes" Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.948773 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-68f67467f4-8bd8x"] Nov 28 16:33:21 crc kubenswrapper[4954]: E1128 16:33:21.949226 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59a8644a-94d1-4074-8285-fc1e974dfae5" containerName="init" Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.949244 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="59a8644a-94d1-4074-8285-fc1e974dfae5" containerName="init" Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.949495 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="59a8644a-94d1-4074-8285-fc1e974dfae5" containerName="init" Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.950636 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.953893 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.954101 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 16:33:21 crc kubenswrapper[4954]: I1128 16:33:21.956919 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68f67467f4-8bd8x"] Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.112740 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-public-tls-certs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.112817 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-internal-tls-certs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.112973 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-combined-ca-bundle\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.113143 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data-custom\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.113175 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45c8821a-baab-4e3c-8ffb-f4fe71722666-logs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.113193 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx4zq\" (UniqueName: \"kubernetes.io/projected/45c8821a-baab-4e3c-8ffb-f4fe71722666-kube-api-access-bx4zq\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.113298 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.215552 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-internal-tls-certs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.215630 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-combined-ca-bundle\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.215750 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data-custom\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.215782 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45c8821a-baab-4e3c-8ffb-f4fe71722666-logs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.215808 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx4zq\" (UniqueName: \"kubernetes.io/projected/45c8821a-baab-4e3c-8ffb-f4fe71722666-kube-api-access-bx4zq\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.215866 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.215959 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-public-tls-certs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.218067 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45c8821a-baab-4e3c-8ffb-f4fe71722666-logs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.236765 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data-custom\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.236924 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-combined-ca-bundle\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.237275 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-public-tls-certs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.237620 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-internal-tls-certs\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.237938 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.241359 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx4zq\" (UniqueName: \"kubernetes.io/projected/45c8821a-baab-4e3c-8ffb-f4fe71722666-kube-api-access-bx4zq\") pod \"barbican-api-68f67467f4-8bd8x\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:22 crc kubenswrapper[4954]: I1128 16:33:22.274555 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:23 crc kubenswrapper[4954]: I1128 16:33:23.147158 4954 generic.go:334] "Generic (PLEG): container finished" podID="2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" containerID="f657b195bba8965fc3348f96e5b3055b8c89ded434fec2bb372b073bc85deee0" exitCode=0 Nov 28 16:33:23 crc kubenswrapper[4954]: I1128 16:33:23.147258 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-v7lkp" event={"ID":"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e","Type":"ContainerDied","Data":"f657b195bba8965fc3348f96e5b3055b8c89ded434fec2bb372b073bc85deee0"} Nov 28 16:33:24 crc kubenswrapper[4954]: I1128 16:33:24.963021 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.070419 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-combined-ca-bundle\") pod \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.070835 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wpc9\" (UniqueName: \"kubernetes.io/projected/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-kube-api-access-7wpc9\") pod \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.070881 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-scripts\") pod \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.070915 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-config-data\") pod \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.070959 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-db-sync-config-data\") pod \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.071013 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-etc-machine-id\") pod \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\" (UID: \"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e\") " Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.071435 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" (UID: "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.090575 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" (UID: "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.092556 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-kube-api-access-7wpc9" (OuterVolumeSpecName: "kube-api-access-7wpc9") pod "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" (UID: "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e"). InnerVolumeSpecName "kube-api-access-7wpc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.097087 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-scripts" (OuterVolumeSpecName: "scripts") pod "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" (UID: "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.106299 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" (UID: "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.159842 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-config-data" (OuterVolumeSpecName: "config-data") pod "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" (UID: "2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.172646 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.172678 4954 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.172691 4954 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.172702 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.172712 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wpc9\" (UniqueName: \"kubernetes.io/projected/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-kube-api-access-7wpc9\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.172724 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.176849 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-v7lkp" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.176840 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-v7lkp" event={"ID":"2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e","Type":"ContainerDied","Data":"68cdbb71e9706db4a377c86beb7c71c5f22e9eb11e855e0014eb791f583c2f7a"} Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.177001 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68cdbb71e9706db4a377c86beb7c71c5f22e9eb11e855e0014eb791f583c2f7a" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.467813 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:25 crc kubenswrapper[4954]: E1128 16:33:25.468281 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" containerName="cinder-db-sync" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.468295 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" containerName="cinder-db-sync" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.468472 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" containerName="cinder-db-sync" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.469660 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.476727 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.476967 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.477105 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.477212 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-474mt" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.478922 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.551654 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-gb5zf"] Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.551950 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" podUID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerName="dnsmasq-dns" containerID="cri-o://c0637cf4f8c41fde1febf14d841ba1af4f1baa489f4900f64fb3bbec798b9cb8" gracePeriod=10 Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.557433 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.582846 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65dba6ce-1c65-433c-90bd-c721bf7171f0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.582931 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.582986 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.583039 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.585846 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz5ht\" (UniqueName: \"kubernetes.io/projected/65dba6ce-1c65-433c-90bd-c721bf7171f0-kube-api-access-nz5ht\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.586074 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-scripts\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.629657 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-tjlhh"] Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.632125 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.642629 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-tjlhh"] Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688551 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688623 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcqmz\" (UniqueName: \"kubernetes.io/projected/baee5634-1827-46e1-884d-6ea76415535e-kube-api-access-gcqmz\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688650 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688673 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688699 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688751 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688788 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz5ht\" (UniqueName: \"kubernetes.io/projected/65dba6ce-1c65-433c-90bd-c721bf7171f0-kube-api-access-nz5ht\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688822 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688875 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-scripts\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688898 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-config\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688927 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-svc\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.688958 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65dba6ce-1c65-433c-90bd-c721bf7171f0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.689051 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65dba6ce-1c65-433c-90bd-c721bf7171f0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.713336 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.714473 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz5ht\" (UniqueName: \"kubernetes.io/projected/65dba6ce-1c65-433c-90bd-c721bf7171f0-kube-api-access-nz5ht\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.715458 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-scripts\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.715520 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.716040 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.760701 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.762694 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.765652 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.790118 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.790222 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-config\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.790262 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-svc\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.790343 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcqmz\" (UniqueName: \"kubernetes.io/projected/baee5634-1827-46e1-884d-6ea76415535e-kube-api-access-gcqmz\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.790382 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.790569 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.791221 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-svc\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.791470 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.791470 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.792264 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.792481 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-config\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.794099 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.820916 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcqmz\" (UniqueName: \"kubernetes.io/projected/baee5634-1827-46e1-884d-6ea76415535e-kube-api-access-gcqmz\") pod \"dnsmasq-dns-6578955fd5-tjlhh\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.821944 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.893340 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data-custom\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.893427 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.893467 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.893512 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-scripts\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.893888 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw8rs\" (UniqueName: \"kubernetes.io/projected/2f22be80-2ca3-4206-923a-b5cbf91256fe-kube-api-access-qw8rs\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.893929 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f22be80-2ca3-4206-923a-b5cbf91256fe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.893976 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f22be80-2ca3-4206-923a-b5cbf91256fe-logs\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.967705 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.995626 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw8rs\" (UniqueName: \"kubernetes.io/projected/2f22be80-2ca3-4206-923a-b5cbf91256fe-kube-api-access-qw8rs\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.995689 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f22be80-2ca3-4206-923a-b5cbf91256fe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.995713 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f22be80-2ca3-4206-923a-b5cbf91256fe-logs\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.995864 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data-custom\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.995896 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.995904 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f22be80-2ca3-4206-923a-b5cbf91256fe-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.995932 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.996215 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-scripts\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:25 crc kubenswrapper[4954]: I1128 16:33:25.997606 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f22be80-2ca3-4206-923a-b5cbf91256fe-logs\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.002239 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-scripts\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.003068 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data-custom\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.007114 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.007509 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.026023 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw8rs\" (UniqueName: \"kubernetes.io/projected/2f22be80-2ca3-4206-923a-b5cbf91256fe-kube-api-access-qw8rs\") pod \"cinder-api-0\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " pod="openstack/cinder-api-0" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.084814 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.229667 4954 generic.go:334] "Generic (PLEG): container finished" podID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerID="c0637cf4f8c41fde1febf14d841ba1af4f1baa489f4900f64fb3bbec798b9cb8" exitCode=0 Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.229887 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" event={"ID":"d4f6d649-427f-4c3f-b541-67e3f529e218","Type":"ContainerDied","Data":"c0637cf4f8c41fde1febf14d841ba1af4f1baa489f4900f64fb3bbec798b9cb8"} Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.427196 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.529595 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-swift-storage-0\") pod \"d4f6d649-427f-4c3f-b541-67e3f529e218\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.529647 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-config\") pod \"d4f6d649-427f-4c3f-b541-67e3f529e218\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.529681 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-nb\") pod \"d4f6d649-427f-4c3f-b541-67e3f529e218\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.529730 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-sb\") pod \"d4f6d649-427f-4c3f-b541-67e3f529e218\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.529833 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-svc\") pod \"d4f6d649-427f-4c3f-b541-67e3f529e218\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.529858 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r79tz\" (UniqueName: \"kubernetes.io/projected/d4f6d649-427f-4c3f-b541-67e3f529e218-kube-api-access-r79tz\") pod \"d4f6d649-427f-4c3f-b541-67e3f529e218\" (UID: \"d4f6d649-427f-4c3f-b541-67e3f529e218\") " Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.546518 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4f6d649-427f-4c3f-b541-67e3f529e218-kube-api-access-r79tz" (OuterVolumeSpecName: "kube-api-access-r79tz") pod "d4f6d649-427f-4c3f-b541-67e3f529e218" (UID: "d4f6d649-427f-4c3f-b541-67e3f529e218"). InnerVolumeSpecName "kube-api-access-r79tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.631848 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r79tz\" (UniqueName: \"kubernetes.io/projected/d4f6d649-427f-4c3f-b541-67e3f529e218-kube-api-access-r79tz\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.858993 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d4f6d649-427f-4c3f-b541-67e3f529e218" (UID: "d4f6d649-427f-4c3f-b541-67e3f529e218"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.877632 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-config" (OuterVolumeSpecName: "config") pod "d4f6d649-427f-4c3f-b541-67e3f529e218" (UID: "d4f6d649-427f-4c3f-b541-67e3f529e218"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.887114 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d4f6d649-427f-4c3f-b541-67e3f529e218" (UID: "d4f6d649-427f-4c3f-b541-67e3f529e218"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.898642 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d4f6d649-427f-4c3f-b541-67e3f529e218" (UID: "d4f6d649-427f-4c3f-b541-67e3f529e218"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.927310 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d4f6d649-427f-4c3f-b541-67e3f529e218" (UID: "d4f6d649-427f-4c3f-b541-67e3f529e218"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.938547 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.938590 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.938605 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.938617 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:26 crc kubenswrapper[4954]: I1128 16:33:26.938630 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4f6d649-427f-4c3f-b541-67e3f529e218-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.164027 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68f67467f4-8bd8x"] Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.182972 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.242379 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68f67467f4-8bd8x" event={"ID":"45c8821a-baab-4e3c-8ffb-f4fe71722666","Type":"ContainerStarted","Data":"96352828906f018ee67c1437994daad7406a1d074cf14422caf6c2dade72a758"} Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.246389 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67c7f88868-224xn" event={"ID":"7ee4f765-e243-4661-b118-dbf3414cfb2e","Type":"ContainerStarted","Data":"03986f5ef00d106e9e4bf7e552bd6d38547e854ac711405aa68cf159ed4e9276"} Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.256277 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f22be80-2ca3-4206-923a-b5cbf91256fe","Type":"ContainerStarted","Data":"2475afee29794596e07cf90d4024e70937253e53b69e9dab42b72c79d6180721"} Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.259276 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" event={"ID":"d4f6d649-427f-4c3f-b541-67e3f529e218","Type":"ContainerDied","Data":"ee1c872899ea27cffac0775bc47db274c9376471381656dcc1b236bcdec4056b"} Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.259333 4954 scope.go:117] "RemoveContainer" containerID="c0637cf4f8c41fde1febf14d841ba1af4f1baa489f4900f64fb3bbec798b9cb8" Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.259337 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-gb5zf" Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.308848 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-tjlhh"] Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.316120 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-gb5zf"] Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.329359 4954 scope.go:117] "RemoveContainer" containerID="7bf1e67c3967f633fafc7654998fcb77d65da7f515e548149bcc08b306834f9d" Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.335749 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-gb5zf"] Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.457569 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.971731 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4f6d649-427f-4c3f-b541-67e3f529e218" path="/var/lib/kubelet/pods/d4f6d649-427f-4c3f-b541-67e3f529e218/volumes" Nov 28 16:33:27 crc kubenswrapper[4954]: I1128 16:33:27.972752 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.222652 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.319363 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerStarted","Data":"e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.319902 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-central-agent" containerID="cri-o://4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e" gracePeriod=30 Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.320201 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.320546 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="proxy-httpd" containerID="cri-o://e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d" gracePeriod=30 Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.320606 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="sg-core" containerID="cri-o://d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833" gracePeriod=30 Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.320652 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-notification-agent" containerID="cri-o://dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63" gracePeriod=30 Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.387298 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.961487969 podStartE2EDuration="55.387279527s" podCreationTimestamp="2025-11-28 16:32:33 +0000 UTC" firstStartedPulling="2025-11-28 16:32:34.97195118 +0000 UTC m=+1308.363619721" lastFinishedPulling="2025-11-28 16:33:26.397742738 +0000 UTC m=+1359.789411279" observedRunningTime="2025-11-28 16:33:28.362011328 +0000 UTC m=+1361.753679889" watchObservedRunningTime="2025-11-28 16:33:28.387279527 +0000 UTC m=+1361.778948068" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.389714 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65dba6ce-1c65-433c-90bd-c721bf7171f0","Type":"ContainerStarted","Data":"688ebab56c8e2f1077e6b3f13aa40be70b666df1794e9db7b60bb2cd20c43b7f"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.399739 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68f67467f4-8bd8x" event={"ID":"45c8821a-baab-4e3c-8ffb-f4fe71722666","Type":"ContainerStarted","Data":"2ef96eee105583bb4f5aff04b8ac6bdfa71044142eddd069aee2fd7df3cab725"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.399782 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68f67467f4-8bd8x" event={"ID":"45c8821a-baab-4e3c-8ffb-f4fe71722666","Type":"ContainerStarted","Data":"98b35779e547ce4c6fa5ddd4466450c48cf21079fb3271d4d7874c2a5254e844"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.400823 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.400850 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.424487 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bdd54468f-gzf48" event={"ID":"6ce804c9-edba-4404-9099-4c0f102aa1b2","Type":"ContainerStarted","Data":"42c4ec9ba6dc6f636e97ef6169d3c32ab858524488e20c3e4beb2047ba334405"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.424550 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bdd54468f-gzf48" event={"ID":"6ce804c9-edba-4404-9099-4c0f102aa1b2","Type":"ContainerStarted","Data":"50b931559f7a77fb0607febad2cc3106710093b63140da41ba7de9a70984ad4b"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.424773 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.437892 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f22be80-2ca3-4206-923a-b5cbf91256fe","Type":"ContainerStarted","Data":"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.441486 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" event={"ID":"70613221-3087-4dc3-9f41-86eb6fe88041","Type":"ContainerStarted","Data":"19d7c28fd7bc7d427f3a1148dfb20dae0ede2b943c17ee7da5ab666f31980fb2"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.444561 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" event={"ID":"70613221-3087-4dc3-9f41-86eb6fe88041","Type":"ContainerStarted","Data":"bd0ca4ad41624f1010f9badf4ad0d9925862fa25503f6b1962dabd2c1181aa11"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.447008 4954 generic.go:334] "Generic (PLEG): container finished" podID="baee5634-1827-46e1-884d-6ea76415535e" containerID="1c9e4a3c552ca4fe86935252803f477885b013536c1d4f2bc415fd6b706a9565" exitCode=0 Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.447068 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" event={"ID":"baee5634-1827-46e1-884d-6ea76415535e","Type":"ContainerDied","Data":"1c9e4a3c552ca4fe86935252803f477885b013536c1d4f2bc415fd6b706a9565"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.447093 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" event={"ID":"baee5634-1827-46e1-884d-6ea76415535e","Type":"ContainerStarted","Data":"c74324e080b76734eee4fbebae3d55db44b2fe00f27416484c0e24e0f4a5b584"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.453323 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.458983 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-68f67467f4-8bd8x" podStartSLOduration=7.458965851 podStartE2EDuration="7.458965851s" podCreationTimestamp="2025-11-28 16:33:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:28.438288798 +0000 UTC m=+1361.829957349" watchObservedRunningTime="2025-11-28 16:33:28.458965851 +0000 UTC m=+1361.850634392" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.475268 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b74b79fdc-99274" event={"ID":"9023ad7d-6621-4ed8-aec4-bd1d0db53088","Type":"ContainerStarted","Data":"ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.475322 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b74b79fdc-99274" event={"ID":"9023ad7d-6621-4ed8-aec4-bd1d0db53088","Type":"ContainerStarted","Data":"d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec"} Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.478685 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.493288 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-bdd54468f-gzf48" podStartSLOduration=9.493265975 podStartE2EDuration="9.493265975s" podCreationTimestamp="2025-11-28 16:33:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:28.473078467 +0000 UTC m=+1361.864747008" watchObservedRunningTime="2025-11-28 16:33:28.493265975 +0000 UTC m=+1361.884934516" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.584027 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" podStartSLOduration=3.781619589 podStartE2EDuration="13.584006592s" podCreationTimestamp="2025-11-28 16:33:15 +0000 UTC" firstStartedPulling="2025-11-28 16:33:16.370062397 +0000 UTC m=+1349.761730928" lastFinishedPulling="2025-11-28 16:33:26.17244939 +0000 UTC m=+1359.564117931" observedRunningTime="2025-11-28 16:33:28.542664155 +0000 UTC m=+1361.934332686" watchObservedRunningTime="2025-11-28 16:33:28.584006592 +0000 UTC m=+1361.975675133" Nov 28 16:33:28 crc kubenswrapper[4954]: I1128 16:33:28.633399 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5b74b79fdc-99274" podStartSLOduration=3.50790491 podStartE2EDuration="13.633379542s" podCreationTimestamp="2025-11-28 16:33:15 +0000 UTC" firstStartedPulling="2025-11-28 16:33:16.104490986 +0000 UTC m=+1349.496159527" lastFinishedPulling="2025-11-28 16:33:26.229965618 +0000 UTC m=+1359.621634159" observedRunningTime="2025-11-28 16:33:28.602494366 +0000 UTC m=+1361.994162897" watchObservedRunningTime="2025-11-28 16:33:28.633379542 +0000 UTC m=+1362.025048083" Nov 28 16:33:29 crc kubenswrapper[4954]: I1128 16:33:29.518586 4954 generic.go:334] "Generic (PLEG): container finished" podID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerID="e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d" exitCode=0 Nov 28 16:33:29 crc kubenswrapper[4954]: I1128 16:33:29.518860 4954 generic.go:334] "Generic (PLEG): container finished" podID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerID="d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833" exitCode=2 Nov 28 16:33:29 crc kubenswrapper[4954]: I1128 16:33:29.518974 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerDied","Data":"e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d"} Nov 28 16:33:29 crc kubenswrapper[4954]: I1128 16:33:29.519009 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerDied","Data":"d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833"} Nov 28 16:33:30 crc kubenswrapper[4954]: I1128 16:33:30.532350 4954 generic.go:334] "Generic (PLEG): container finished" podID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerID="4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e" exitCode=0 Nov 28 16:33:30 crc kubenswrapper[4954]: I1128 16:33:30.532426 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerDied","Data":"4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e"} Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.480229 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.480578 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.553073 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" event={"ID":"baee5634-1827-46e1-884d-6ea76415535e","Type":"ContainerStarted","Data":"5798abdd42e2387bd7b208030510f19c5d0e913d5d27b7c3d3569033adf96da3"} Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.553554 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.557178 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f22be80-2ca3-4206-923a-b5cbf91256fe","Type":"ContainerStarted","Data":"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35"} Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.557402 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.557407 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api" containerID="cri-o://0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35" gracePeriod=30 Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.557395 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api-log" containerID="cri-o://320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555" gracePeriod=30 Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.573114 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" podStartSLOduration=7.573095936 podStartE2EDuration="7.573095936s" podCreationTimestamp="2025-11-28 16:33:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:32.572736425 +0000 UTC m=+1365.964404976" watchObservedRunningTime="2025-11-28 16:33:32.573095936 +0000 UTC m=+1365.964764487" Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.574216 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-67c7f88868-224xn" podStartSLOduration=16.574207891 podStartE2EDuration="16.574207891s" podCreationTimestamp="2025-11-28 16:33:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:28.648604334 +0000 UTC m=+1362.040272875" watchObservedRunningTime="2025-11-28 16:33:32.574207891 +0000 UTC m=+1365.965876442" Nov 28 16:33:32 crc kubenswrapper[4954]: I1128 16:33:32.598385 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.598363214 podStartE2EDuration="7.598363214s" podCreationTimestamp="2025-11-28 16:33:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:32.593996256 +0000 UTC m=+1365.985664807" watchObservedRunningTime="2025-11-28 16:33:32.598363214 +0000 UTC m=+1365.990031765" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.333195 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.403222 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-run-httpd\") pod \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.403638 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-sg-core-conf-yaml\") pod \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.403712 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-combined-ca-bundle\") pod \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.403811 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjbx2\" (UniqueName: \"kubernetes.io/projected/b8d4de09-a431-4eae-9ac8-23a33e1039b4-kube-api-access-zjbx2\") pod \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.403865 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-scripts\") pod \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.403900 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-config-data\") pod \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.403963 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-log-httpd\") pod \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\" (UID: \"b8d4de09-a431-4eae-9ac8-23a33e1039b4\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.405048 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b8d4de09-a431-4eae-9ac8-23a33e1039b4" (UID: "b8d4de09-a431-4eae-9ac8-23a33e1039b4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.409036 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b8d4de09-a431-4eae-9ac8-23a33e1039b4" (UID: "b8d4de09-a431-4eae-9ac8-23a33e1039b4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.417579 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8d4de09-a431-4eae-9ac8-23a33e1039b4-kube-api-access-zjbx2" (OuterVolumeSpecName: "kube-api-access-zjbx2") pod "b8d4de09-a431-4eae-9ac8-23a33e1039b4" (UID: "b8d4de09-a431-4eae-9ac8-23a33e1039b4"). InnerVolumeSpecName "kube-api-access-zjbx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.426758 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-scripts" (OuterVolumeSpecName: "scripts") pod "b8d4de09-a431-4eae-9ac8-23a33e1039b4" (UID: "b8d4de09-a431-4eae-9ac8-23a33e1039b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.490054 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b8d4de09-a431-4eae-9ac8-23a33e1039b4" (UID: "b8d4de09-a431-4eae-9ac8-23a33e1039b4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.506984 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.507712 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.507750 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.507761 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjbx2\" (UniqueName: \"kubernetes.io/projected/b8d4de09-a431-4eae-9ac8-23a33e1039b4-kube-api-access-zjbx2\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.507770 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.507778 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b8d4de09-a431-4eae-9ac8-23a33e1039b4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.516499 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8d4de09-a431-4eae-9ac8-23a33e1039b4" (UID: "b8d4de09-a431-4eae-9ac8-23a33e1039b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.599901 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-config-data" (OuterVolumeSpecName: "config-data") pod "b8d4de09-a431-4eae-9ac8-23a33e1039b4" (UID: "b8d4de09-a431-4eae-9ac8-23a33e1039b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.601287 4954 generic.go:334] "Generic (PLEG): container finished" podID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerID="dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63" exitCode=0 Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.601412 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.602428 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerDied","Data":"dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63"} Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.602510 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b8d4de09-a431-4eae-9ac8-23a33e1039b4","Type":"ContainerDied","Data":"6e5406c54fffe7c2c114e96589ca2f0567bfafe41bcd2f68331983059117b6da"} Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.602533 4954 scope.go:117] "RemoveContainer" containerID="e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.608908 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f22be80-2ca3-4206-923a-b5cbf91256fe-etc-machine-id\") pod \"2f22be80-2ca3-4206-923a-b5cbf91256fe\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.608965 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-scripts\") pod \"2f22be80-2ca3-4206-923a-b5cbf91256fe\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.609019 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-combined-ca-bundle\") pod \"2f22be80-2ca3-4206-923a-b5cbf91256fe\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.609058 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qw8rs\" (UniqueName: \"kubernetes.io/projected/2f22be80-2ca3-4206-923a-b5cbf91256fe-kube-api-access-qw8rs\") pod \"2f22be80-2ca3-4206-923a-b5cbf91256fe\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.609140 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data\") pod \"2f22be80-2ca3-4206-923a-b5cbf91256fe\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.609216 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data-custom\") pod \"2f22be80-2ca3-4206-923a-b5cbf91256fe\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.609274 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f22be80-2ca3-4206-923a-b5cbf91256fe-logs\") pod \"2f22be80-2ca3-4206-923a-b5cbf91256fe\" (UID: \"2f22be80-2ca3-4206-923a-b5cbf91256fe\") " Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.609675 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.609695 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8d4de09-a431-4eae-9ac8-23a33e1039b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.610569 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f22be80-2ca3-4206-923a-b5cbf91256fe-logs" (OuterVolumeSpecName: "logs") pod "2f22be80-2ca3-4206-923a-b5cbf91256fe" (UID: "2f22be80-2ca3-4206-923a-b5cbf91256fe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.610636 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2f22be80-2ca3-4206-923a-b5cbf91256fe-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2f22be80-2ca3-4206-923a-b5cbf91256fe" (UID: "2f22be80-2ca3-4206-923a-b5cbf91256fe"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.615408 4954 generic.go:334] "Generic (PLEG): container finished" podID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerID="0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35" exitCode=0 Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.615449 4954 generic.go:334] "Generic (PLEG): container finished" podID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerID="320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555" exitCode=143 Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.615975 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f22be80-2ca3-4206-923a-b5cbf91256fe","Type":"ContainerDied","Data":"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35"} Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.616038 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f22be80-2ca3-4206-923a-b5cbf91256fe","Type":"ContainerDied","Data":"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555"} Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.616048 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2f22be80-2ca3-4206-923a-b5cbf91256fe","Type":"ContainerDied","Data":"2475afee29794596e07cf90d4024e70937253e53b69e9dab42b72c79d6180721"} Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.616114 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.618676 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f22be80-2ca3-4206-923a-b5cbf91256fe-kube-api-access-qw8rs" (OuterVolumeSpecName: "kube-api-access-qw8rs") pod "2f22be80-2ca3-4206-923a-b5cbf91256fe" (UID: "2f22be80-2ca3-4206-923a-b5cbf91256fe"). InnerVolumeSpecName "kube-api-access-qw8rs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.619672 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-scripts" (OuterVolumeSpecName: "scripts") pod "2f22be80-2ca3-4206-923a-b5cbf91256fe" (UID: "2f22be80-2ca3-4206-923a-b5cbf91256fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.620600 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2f22be80-2ca3-4206-923a-b5cbf91256fe" (UID: "2f22be80-2ca3-4206-923a-b5cbf91256fe"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.641583 4954 scope.go:117] "RemoveContainer" containerID="d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.649335 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f22be80-2ca3-4206-923a-b5cbf91256fe" (UID: "2f22be80-2ca3-4206-923a-b5cbf91256fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.659713 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.675859 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.684873 4954 scope.go:117] "RemoveContainer" containerID="dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685254 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685723 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-notification-agent" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685744 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-notification-agent" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685765 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685772 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685784 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerName="init" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685791 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerName="init" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685808 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api-log" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685816 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api-log" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685830 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="proxy-httpd" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685843 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="proxy-httpd" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685857 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-central-agent" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685865 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-central-agent" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685884 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="sg-core" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685892 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="sg-core" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.685904 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerName="dnsmasq-dns" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.685911 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerName="dnsmasq-dns" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.686109 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="proxy-httpd" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.686126 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api-log" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.686142 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4f6d649-427f-4c3f-b541-67e3f529e218" containerName="dnsmasq-dns" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.686159 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-notification-agent" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.686178 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" containerName="cinder-api" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.686193 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="sg-core" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.686214 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" containerName="ceilometer-central-agent" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.688698 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.691598 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.692973 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.711510 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.711568 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f22be80-2ca3-4206-923a-b5cbf91256fe-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.711581 4954 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2f22be80-2ca3-4206-923a-b5cbf91256fe-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.711592 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.711604 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.711615 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qw8rs\" (UniqueName: \"kubernetes.io/projected/2f22be80-2ca3-4206-923a-b5cbf91256fe-kube-api-access-qw8rs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.712738 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.713227 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data" (OuterVolumeSpecName: "config-data") pod "2f22be80-2ca3-4206-923a-b5cbf91256fe" (UID: "2f22be80-2ca3-4206-923a-b5cbf91256fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.732251 4954 scope.go:117] "RemoveContainer" containerID="4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.765496 4954 scope.go:117] "RemoveContainer" containerID="e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.766174 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d\": container with ID starting with e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d not found: ID does not exist" containerID="e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.766204 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d"} err="failed to get container status \"e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d\": rpc error: code = NotFound desc = could not find container \"e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d\": container with ID starting with e501f343de04de51054860b7bb52584574e45ed94f8a371b6fac0b2b0936a70d not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.766223 4954 scope.go:117] "RemoveContainer" containerID="d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.766566 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833\": container with ID starting with d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833 not found: ID does not exist" containerID="d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.766707 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833"} err="failed to get container status \"d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833\": rpc error: code = NotFound desc = could not find container \"d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833\": container with ID starting with d2eb138f847e509bd3995e2f89d03d76cfe5d75e47f668141de3240521fbd833 not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.766834 4954 scope.go:117] "RemoveContainer" containerID="dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.767894 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63\": container with ID starting with dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63 not found: ID does not exist" containerID="dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.767921 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63"} err="failed to get container status \"dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63\": rpc error: code = NotFound desc = could not find container \"dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63\": container with ID starting with dd316bf065b5e906aadfff02f3ab7f5bc653417348b6d60e4628ece70811cf63 not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.767939 4954 scope.go:117] "RemoveContainer" containerID="4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.768206 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e\": container with ID starting with 4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e not found: ID does not exist" containerID="4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.768302 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e"} err="failed to get container status \"4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e\": rpc error: code = NotFound desc = could not find container \"4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e\": container with ID starting with 4d382ba528f41aee190edd15fc2b523c22449fd2cee1af2ba24ed905aa18370e not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.768424 4954 scope.go:117] "RemoveContainer" containerID="0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.790774 4954 scope.go:117] "RemoveContainer" containerID="320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.813019 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.813061 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-run-httpd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.813318 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-log-httpd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.813448 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2snd\" (UniqueName: \"kubernetes.io/projected/6361e8de-58e2-48d9-a6c7-53466e37e3a5-kube-api-access-r2snd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.813567 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-scripts\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.813717 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.813862 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-config-data\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.814031 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f22be80-2ca3-4206-923a-b5cbf91256fe-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.828662 4954 scope.go:117] "RemoveContainer" containerID="0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.829099 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35\": container with ID starting with 0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35 not found: ID does not exist" containerID="0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.829219 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35"} err="failed to get container status \"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35\": rpc error: code = NotFound desc = could not find container \"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35\": container with ID starting with 0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35 not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.829301 4954 scope.go:117] "RemoveContainer" containerID="320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555" Nov 28 16:33:33 crc kubenswrapper[4954]: E1128 16:33:33.829915 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555\": container with ID starting with 320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555 not found: ID does not exist" containerID="320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.829997 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555"} err="failed to get container status \"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555\": rpc error: code = NotFound desc = could not find container \"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555\": container with ID starting with 320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555 not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.830076 4954 scope.go:117] "RemoveContainer" containerID="0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.830439 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35"} err="failed to get container status \"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35\": rpc error: code = NotFound desc = could not find container \"0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35\": container with ID starting with 0e99d3eddc6537865b835bb6b27d1b004dadd2058f4bd560a080b247c02bcf35 not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.830519 4954 scope.go:117] "RemoveContainer" containerID="320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.830841 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555"} err="failed to get container status \"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555\": rpc error: code = NotFound desc = could not find container \"320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555\": container with ID starting with 320a0393e70de82a6b1cdb53991c1a4967968dc67f6912f6275149e56bd83555 not found: ID does not exist" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.879107 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8d4de09-a431-4eae-9ac8-23a33e1039b4" path="/var/lib/kubelet/pods/b8d4de09-a431-4eae-9ac8-23a33e1039b4/volumes" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.915566 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2snd\" (UniqueName: \"kubernetes.io/projected/6361e8de-58e2-48d9-a6c7-53466e37e3a5-kube-api-access-r2snd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.915790 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-scripts\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.915857 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.915975 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-config-data\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.916100 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.916137 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-run-httpd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.916292 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-log-httpd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.916798 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-log-httpd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.916853 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-run-httpd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.921254 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.921652 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.921858 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-scripts\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.931182 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-config-data\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:33 crc kubenswrapper[4954]: I1128 16:33:33.933045 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2snd\" (UniqueName: \"kubernetes.io/projected/6361e8de-58e2-48d9-a6c7-53466e37e3a5-kube-api-access-r2snd\") pod \"ceilometer-0\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " pod="openstack/ceilometer-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.022186 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.065217 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.073092 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.082041 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.108433 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.110287 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.124748 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.125007 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.147321 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.164408 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222100 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222143 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222172 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222191 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8qrx\" (UniqueName: \"kubernetes.io/projected/7d8b87c9-4dd0-431c-a555-49141762763a-kube-api-access-k8qrx\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222219 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data-custom\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222239 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-scripts\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222267 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b87c9-4dd0-431c-a555-49141762763a-logs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222283 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d8b87c9-4dd0-431c-a555-49141762763a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.222310 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.323937 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324263 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8qrx\" (UniqueName: \"kubernetes.io/projected/7d8b87c9-4dd0-431c-a555-49141762763a-kube-api-access-k8qrx\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324317 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data-custom\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324340 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-scripts\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324369 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b87c9-4dd0-431c-a555-49141762763a-logs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324401 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d8b87c9-4dd0-431c-a555-49141762763a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324433 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324548 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324576 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.324985 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b87c9-4dd0-431c-a555-49141762763a-logs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.325034 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d8b87c9-4dd0-431c-a555-49141762763a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.336176 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-scripts\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.336279 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.336289 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data-custom\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.336469 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.336684 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.336890 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.351660 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8qrx\" (UniqueName: \"kubernetes.io/projected/7d8b87c9-4dd0-431c-a555-49141762763a-kube-api-access-k8qrx\") pod \"cinder-api-0\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.524518 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.628791 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65dba6ce-1c65-433c-90bd-c721bf7171f0","Type":"ContainerStarted","Data":"fbdd5cf5a10a1349ac9a1ff09bcdfc6d3741cad9ad9ba329c75c15745d5961f8"} Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.629135 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65dba6ce-1c65-433c-90bd-c721bf7171f0","Type":"ContainerStarted","Data":"fcfb95b7f3e18870b78060330e2a591dd77bda72b104e157af8204dcc5f66950"} Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.668175 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.216104603 podStartE2EDuration="9.668153928s" podCreationTimestamp="2025-11-28 16:33:25 +0000 UTC" firstStartedPulling="2025-11-28 16:33:27.572142433 +0000 UTC m=+1360.963810974" lastFinishedPulling="2025-11-28 16:33:33.024191758 +0000 UTC m=+1366.415860299" observedRunningTime="2025-11-28 16:33:34.655396515 +0000 UTC m=+1368.047065066" watchObservedRunningTime="2025-11-28 16:33:34.668153928 +0000 UTC m=+1368.059822469" Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.686481 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:34 crc kubenswrapper[4954]: W1128 16:33:34.696206 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6361e8de_58e2_48d9_a6c7_53466e37e3a5.slice/crio-e83903f487a2cb8f86d554d66b1c110d7832e2f12dad148451c96d5d8c25b44c WatchSource:0}: Error finding container e83903f487a2cb8f86d554d66b1c110d7832e2f12dad148451c96d5d8c25b44c: Status 404 returned error can't find the container with id e83903f487a2cb8f86d554d66b1c110d7832e2f12dad148451c96d5d8c25b44c Nov 28 16:33:34 crc kubenswrapper[4954]: I1128 16:33:34.977563 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:33:34 crc kubenswrapper[4954]: W1128 16:33:34.986260 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d8b87c9_4dd0_431c_a555_49141762763a.slice/crio-fcac9bad37dd256d23d145390f15de9e1ee2ff1d201714d9b00e4c6f7ebaca72 WatchSource:0}: Error finding container fcac9bad37dd256d23d145390f15de9e1ee2ff1d201714d9b00e4c6f7ebaca72: Status 404 returned error can't find the container with id fcac9bad37dd256d23d145390f15de9e1ee2ff1d201714d9b00e4c6f7ebaca72 Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.658885 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7d8b87c9-4dd0-431c-a555-49141762763a","Type":"ContainerStarted","Data":"8add70c010322f7f578f7c468da2668563f210dfc294ebf9f41c56210d6165fb"} Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.659451 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7d8b87c9-4dd0-431c-a555-49141762763a","Type":"ContainerStarted","Data":"fcac9bad37dd256d23d145390f15de9e1ee2ff1d201714d9b00e4c6f7ebaca72"} Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.661725 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerStarted","Data":"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186"} Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.661761 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerStarted","Data":"e83903f487a2cb8f86d554d66b1c110d7832e2f12dad148451c96d5d8c25b44c"} Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.812928 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.823013 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.915966 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f22be80-2ca3-4206-923a-b5cbf91256fe" path="/var/lib/kubelet/pods/2f22be80-2ca3-4206-923a-b5cbf91256fe/volumes" Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.947256 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-786547d6db-f76dj"] Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.948143 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-786547d6db-f76dj" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api-log" containerID="cri-o://8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb" gracePeriod=30 Nov 28 16:33:35 crc kubenswrapper[4954]: I1128 16:33:35.948418 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-786547d6db-f76dj" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api" containerID="cri-o://c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049" gracePeriod=30 Nov 28 16:33:36 crc kubenswrapper[4954]: I1128 16:33:36.696978 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerStarted","Data":"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c"} Nov 28 16:33:36 crc kubenswrapper[4954]: I1128 16:33:36.699124 4954 generic.go:334] "Generic (PLEG): container finished" podID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerID="8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb" exitCode=143 Nov 28 16:33:36 crc kubenswrapper[4954]: I1128 16:33:36.699745 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-786547d6db-f76dj" event={"ID":"6af9722b-be10-4c3e-9c73-96b114678d9d","Type":"ContainerDied","Data":"8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb"} Nov 28 16:33:37 crc kubenswrapper[4954]: I1128 16:33:37.775170 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7d8b87c9-4dd0-431c-a555-49141762763a","Type":"ContainerStarted","Data":"786c11653ae6fc057189678b362be2013431f0eae8133efcb4e6222525e8f2c6"} Nov 28 16:33:37 crc kubenswrapper[4954]: I1128 16:33:37.776590 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 16:33:37 crc kubenswrapper[4954]: I1128 16:33:37.800360 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.800339589 podStartE2EDuration="3.800339589s" podCreationTimestamp="2025-11-28 16:33:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:37.793094919 +0000 UTC m=+1371.184763460" watchObservedRunningTime="2025-11-28 16:33:37.800339589 +0000 UTC m=+1371.192008130" Nov 28 16:33:38 crc kubenswrapper[4954]: I1128 16:33:38.792025 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerStarted","Data":"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb"} Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.567714 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.689774 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-combined-ca-bundle\") pod \"6af9722b-be10-4c3e-9c73-96b114678d9d\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.690438 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6af9722b-be10-4c3e-9c73-96b114678d9d-logs\") pod \"6af9722b-be10-4c3e-9c73-96b114678d9d\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.690890 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6af9722b-be10-4c3e-9c73-96b114678d9d-logs" (OuterVolumeSpecName: "logs") pod "6af9722b-be10-4c3e-9c73-96b114678d9d" (UID: "6af9722b-be10-4c3e-9c73-96b114678d9d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.691163 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data\") pod \"6af9722b-be10-4c3e-9c73-96b114678d9d\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.691250 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data-custom\") pod \"6af9722b-be10-4c3e-9c73-96b114678d9d\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.691336 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxfkk\" (UniqueName: \"kubernetes.io/projected/6af9722b-be10-4c3e-9c73-96b114678d9d-kube-api-access-vxfkk\") pod \"6af9722b-be10-4c3e-9c73-96b114678d9d\" (UID: \"6af9722b-be10-4c3e-9c73-96b114678d9d\") " Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.694338 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6af9722b-be10-4c3e-9c73-96b114678d9d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.701088 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6af9722b-be10-4c3e-9c73-96b114678d9d-kube-api-access-vxfkk" (OuterVolumeSpecName: "kube-api-access-vxfkk") pod "6af9722b-be10-4c3e-9c73-96b114678d9d" (UID: "6af9722b-be10-4c3e-9c73-96b114678d9d"). InnerVolumeSpecName "kube-api-access-vxfkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.701944 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6af9722b-be10-4c3e-9c73-96b114678d9d" (UID: "6af9722b-be10-4c3e-9c73-96b114678d9d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.764325 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6af9722b-be10-4c3e-9c73-96b114678d9d" (UID: "6af9722b-be10-4c3e-9c73-96b114678d9d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.774761 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data" (OuterVolumeSpecName: "config-data") pod "6af9722b-be10-4c3e-9c73-96b114678d9d" (UID: "6af9722b-be10-4c3e-9c73-96b114678d9d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.795951 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.795981 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.795993 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxfkk\" (UniqueName: \"kubernetes.io/projected/6af9722b-be10-4c3e-9c73-96b114678d9d-kube-api-access-vxfkk\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.796002 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af9722b-be10-4c3e-9c73-96b114678d9d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.807266 4954 generic.go:334] "Generic (PLEG): container finished" podID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerID="c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049" exitCode=0 Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.808269 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-786547d6db-f76dj" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.808462 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-786547d6db-f76dj" event={"ID":"6af9722b-be10-4c3e-9c73-96b114678d9d","Type":"ContainerDied","Data":"c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049"} Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.808496 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-786547d6db-f76dj" event={"ID":"6af9722b-be10-4c3e-9c73-96b114678d9d","Type":"ContainerDied","Data":"29ee47ca0e68ab917232eb38a608a4f182545ca8915533e473940fb09fb1aea6"} Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.808517 4954 scope.go:117] "RemoveContainer" containerID="c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.870928 4954 scope.go:117] "RemoveContainer" containerID="8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.879682 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-786547d6db-f76dj"] Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.890926 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-786547d6db-f76dj"] Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.901660 4954 scope.go:117] "RemoveContainer" containerID="c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049" Nov 28 16:33:39 crc kubenswrapper[4954]: E1128 16:33:39.903194 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049\": container with ID starting with c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049 not found: ID does not exist" containerID="c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.903232 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049"} err="failed to get container status \"c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049\": rpc error: code = NotFound desc = could not find container \"c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049\": container with ID starting with c3793f97728a1daa5adad8b0318266a145058dfb78f7af045a770a6b937dc049 not found: ID does not exist" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.903256 4954 scope.go:117] "RemoveContainer" containerID="8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb" Nov 28 16:33:39 crc kubenswrapper[4954]: E1128 16:33:39.903673 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb\": container with ID starting with 8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb not found: ID does not exist" containerID="8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb" Nov 28 16:33:39 crc kubenswrapper[4954]: I1128 16:33:39.903720 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb"} err="failed to get container status \"8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb\": rpc error: code = NotFound desc = could not find container \"8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb\": container with ID starting with 8aef752383c486bd8b446382fa11b0788bce12382196e6716a9ed891dd59abcb not found: ID does not exist" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.089070 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.804032 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 16:33:40 crc kubenswrapper[4954]: E1128 16:33:40.804496 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api-log" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.804520 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api-log" Nov 28 16:33:40 crc kubenswrapper[4954]: E1128 16:33:40.804578 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.804589 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.804813 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.804834 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" containerName="barbican-api-log" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.805570 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.808883 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.809377 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.811113 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-xvw67" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.813612 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.819757 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerStarted","Data":"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d"} Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.819933 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.824214 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.824272 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.859792 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.932893818 podStartE2EDuration="7.8597737s" podCreationTimestamp="2025-11-28 16:33:33 +0000 UTC" firstStartedPulling="2025-11-28 16:33:34.698633082 +0000 UTC m=+1368.090301623" lastFinishedPulling="2025-11-28 16:33:39.625512964 +0000 UTC m=+1373.017181505" observedRunningTime="2025-11-28 16:33:40.852910463 +0000 UTC m=+1374.244579004" watchObservedRunningTime="2025-11-28 16:33:40.8597737 +0000 UTC m=+1374.251442231" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.915989 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.916079 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnkwt\" (UniqueName: \"kubernetes.io/projected/082f7647-b97a-4d57-83b1-ae69d61827b1-kube-api-access-pnkwt\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.916104 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.916311 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:40 crc kubenswrapper[4954]: I1128 16:33:40.969627 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.022420 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.022559 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnkwt\" (UniqueName: \"kubernetes.io/projected/082f7647-b97a-4d57-83b1-ae69d61827b1-kube-api-access-pnkwt\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.022591 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.022782 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.026034 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.028989 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config-secret\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.030820 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-vbxm2"] Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.031914 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" podUID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerName="dnsmasq-dns" containerID="cri-o://4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af" gracePeriod=10 Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.033292 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.067821 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnkwt\" (UniqueName: \"kubernetes.io/projected/082f7647-b97a-4d57-83b1-ae69d61827b1-kube-api-access-pnkwt\") pod \"openstackclient\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.129064 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.267200 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.342552 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.642928 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.736758 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.742162 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-config\") pod \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.742398 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-nb\") pod \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.742460 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nbpv\" (UniqueName: \"kubernetes.io/projected/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-kube-api-access-4nbpv\") pod \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.742764 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-svc\") pod \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.742793 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-swift-storage-0\") pod \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.742839 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-sb\") pod \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\" (UID: \"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4\") " Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.755188 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-kube-api-access-4nbpv" (OuterVolumeSpecName: "kube-api-access-4nbpv") pod "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" (UID: "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4"). InnerVolumeSpecName "kube-api-access-4nbpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.795399 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" (UID: "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.798374 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" (UID: "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.823982 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" (UID: "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.843747 4954 generic.go:334] "Generic (PLEG): container finished" podID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerID="4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af" exitCode=0 Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.843806 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" event={"ID":"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4","Type":"ContainerDied","Data":"4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af"} Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.843838 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" event={"ID":"9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4","Type":"ContainerDied","Data":"8cfaab159a84085345682ec6a24b2ad75a95873161b0929d46e08ef6a9819872"} Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.843870 4954 scope.go:117] "RemoveContainer" containerID="4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.844005 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-vbxm2" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.845052 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.845094 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nbpv\" (UniqueName: \"kubernetes.io/projected/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-kube-api-access-4nbpv\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.845107 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.845120 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.852124 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" (UID: "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.852201 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"082f7647-b97a-4d57-83b1-ae69d61827b1","Type":"ContainerStarted","Data":"ce442eda8a94a05f1c1369d770a1daf59a0df33e7dc035a96fb07e09e3fc3bbd"} Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.852483 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="cinder-scheduler" containerID="cri-o://fcfb95b7f3e18870b78060330e2a591dd77bda72b104e157af8204dcc5f66950" gracePeriod=30 Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.853036 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="probe" containerID="cri-o://fbdd5cf5a10a1349ac9a1ff09bcdfc6d3741cad9ad9ba329c75c15745d5961f8" gracePeriod=30 Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.865188 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-config" (OuterVolumeSpecName: "config") pod "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" (UID: "9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.898739 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6af9722b-be10-4c3e-9c73-96b114678d9d" path="/var/lib/kubelet/pods/6af9722b-be10-4c3e-9c73-96b114678d9d/volumes" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.928625 4954 scope.go:117] "RemoveContainer" containerID="c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.947343 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.947390 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.974320 4954 scope.go:117] "RemoveContainer" containerID="4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af" Nov 28 16:33:41 crc kubenswrapper[4954]: E1128 16:33:41.977766 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af\": container with ID starting with 4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af not found: ID does not exist" containerID="4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.977828 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af"} err="failed to get container status \"4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af\": rpc error: code = NotFound desc = could not find container \"4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af\": container with ID starting with 4c9e91866b6c7b84ea8f51d5283421aad41d1a73ed9c11f6c611001ab81c73af not found: ID does not exist" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.977859 4954 scope.go:117] "RemoveContainer" containerID="c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8" Nov 28 16:33:41 crc kubenswrapper[4954]: E1128 16:33:41.978239 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8\": container with ID starting with c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8 not found: ID does not exist" containerID="c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8" Nov 28 16:33:41 crc kubenswrapper[4954]: I1128 16:33:41.978267 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8"} err="failed to get container status \"c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8\": rpc error: code = NotFound desc = could not find container \"c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8\": container with ID starting with c8b33144be84f04bad707922112f996de86772fe1a09163aa73a4e2f40466ac8 not found: ID does not exist" Nov 28 16:33:42 crc kubenswrapper[4954]: I1128 16:33:42.168305 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-vbxm2"] Nov 28 16:33:42 crc kubenswrapper[4954]: I1128 16:33:42.181851 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-vbxm2"] Nov 28 16:33:42 crc kubenswrapper[4954]: I1128 16:33:42.868090 4954 generic.go:334] "Generic (PLEG): container finished" podID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerID="fbdd5cf5a10a1349ac9a1ff09bcdfc6d3741cad9ad9ba329c75c15745d5961f8" exitCode=0 Nov 28 16:33:42 crc kubenswrapper[4954]: I1128 16:33:42.868376 4954 generic.go:334] "Generic (PLEG): container finished" podID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerID="fcfb95b7f3e18870b78060330e2a591dd77bda72b104e157af8204dcc5f66950" exitCode=0 Nov 28 16:33:42 crc kubenswrapper[4954]: I1128 16:33:42.868202 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65dba6ce-1c65-433c-90bd-c721bf7171f0","Type":"ContainerDied","Data":"fbdd5cf5a10a1349ac9a1ff09bcdfc6d3741cad9ad9ba329c75c15745d5961f8"} Nov 28 16:33:42 crc kubenswrapper[4954]: I1128 16:33:42.868414 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65dba6ce-1c65-433c-90bd-c721bf7171f0","Type":"ContainerDied","Data":"fcfb95b7f3e18870b78060330e2a591dd77bda72b104e157af8204dcc5f66950"} Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.167024 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.268786 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data\") pod \"65dba6ce-1c65-433c-90bd-c721bf7171f0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.268911 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nz5ht\" (UniqueName: \"kubernetes.io/projected/65dba6ce-1c65-433c-90bd-c721bf7171f0-kube-api-access-nz5ht\") pod \"65dba6ce-1c65-433c-90bd-c721bf7171f0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.268967 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65dba6ce-1c65-433c-90bd-c721bf7171f0-etc-machine-id\") pod \"65dba6ce-1c65-433c-90bd-c721bf7171f0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.269026 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-combined-ca-bundle\") pod \"65dba6ce-1c65-433c-90bd-c721bf7171f0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.269071 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-scripts\") pod \"65dba6ce-1c65-433c-90bd-c721bf7171f0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.269122 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data-custom\") pod \"65dba6ce-1c65-433c-90bd-c721bf7171f0\" (UID: \"65dba6ce-1c65-433c-90bd-c721bf7171f0\") " Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.273643 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/65dba6ce-1c65-433c-90bd-c721bf7171f0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "65dba6ce-1c65-433c-90bd-c721bf7171f0" (UID: "65dba6ce-1c65-433c-90bd-c721bf7171f0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.278686 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65dba6ce-1c65-433c-90bd-c721bf7171f0-kube-api-access-nz5ht" (OuterVolumeSpecName: "kube-api-access-nz5ht") pod "65dba6ce-1c65-433c-90bd-c721bf7171f0" (UID: "65dba6ce-1c65-433c-90bd-c721bf7171f0"). InnerVolumeSpecName "kube-api-access-nz5ht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.279390 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "65dba6ce-1c65-433c-90bd-c721bf7171f0" (UID: "65dba6ce-1c65-433c-90bd-c721bf7171f0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.283250 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-scripts" (OuterVolumeSpecName: "scripts") pod "65dba6ce-1c65-433c-90bd-c721bf7171f0" (UID: "65dba6ce-1c65-433c-90bd-c721bf7171f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.351950 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65dba6ce-1c65-433c-90bd-c721bf7171f0" (UID: "65dba6ce-1c65-433c-90bd-c721bf7171f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.374586 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.374628 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.374639 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.374651 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nz5ht\" (UniqueName: \"kubernetes.io/projected/65dba6ce-1c65-433c-90bd-c721bf7171f0-kube-api-access-nz5ht\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.374662 4954 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65dba6ce-1c65-433c-90bd-c721bf7171f0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.397323 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data" (OuterVolumeSpecName: "config-data") pod "65dba6ce-1c65-433c-90bd-c721bf7171f0" (UID: "65dba6ce-1c65-433c-90bd-c721bf7171f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.476249 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65dba6ce-1c65-433c-90bd-c721bf7171f0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.868518 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" path="/var/lib/kubelet/pods/9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4/volumes" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.903972 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"65dba6ce-1c65-433c-90bd-c721bf7171f0","Type":"ContainerDied","Data":"688ebab56c8e2f1077e6b3f13aa40be70b666df1794e9db7b60bb2cd20c43b7f"} Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.904036 4954 scope.go:117] "RemoveContainer" containerID="fbdd5cf5a10a1349ac9a1ff09bcdfc6d3741cad9ad9ba329c75c15745d5961f8" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.904159 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.940636 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.962786 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.965954 4954 scope.go:117] "RemoveContainer" containerID="fcfb95b7f3e18870b78060330e2a591dd77bda72b104e157af8204dcc5f66950" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.979923 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:43 crc kubenswrapper[4954]: E1128 16:33:43.980312 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerName="init" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.980335 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerName="init" Nov 28 16:33:43 crc kubenswrapper[4954]: E1128 16:33:43.980350 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerName="dnsmasq-dns" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.980357 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerName="dnsmasq-dns" Nov 28 16:33:43 crc kubenswrapper[4954]: E1128 16:33:43.980384 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="cinder-scheduler" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.980390 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="cinder-scheduler" Nov 28 16:33:43 crc kubenswrapper[4954]: E1128 16:33:43.980406 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="probe" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.980411 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="probe" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.980600 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="cinder-scheduler" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.980625 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" containerName="probe" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.980642 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea4a7d7-8fb8-4a12-936a-30fbcc5810c4" containerName="dnsmasq-dns" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.981580 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.986861 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 16:33:43 crc kubenswrapper[4954]: I1128 16:33:43.993298 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.091113 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.091196 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.091224 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c4439c3d-c90f-4b13-87a4-01c211cec875-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.091275 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.091300 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v8bx\" (UniqueName: \"kubernetes.io/projected/c4439c3d-c90f-4b13-87a4-01c211cec875-kube-api-access-4v8bx\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.091380 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-scripts\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.193517 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.193591 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.193619 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c4439c3d-c90f-4b13-87a4-01c211cec875-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.193653 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.193675 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v8bx\" (UniqueName: \"kubernetes.io/projected/c4439c3d-c90f-4b13-87a4-01c211cec875-kube-api-access-4v8bx\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.193720 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-scripts\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.193722 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c4439c3d-c90f-4b13-87a4-01c211cec875-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.201064 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.204105 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-scripts\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.204368 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.217726 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.222144 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v8bx\" (UniqueName: \"kubernetes.io/projected/c4439c3d-c90f-4b13-87a4-01c211cec875-kube-api-access-4v8bx\") pod \"cinder-scheduler-0\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.307484 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.513465 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5fb676564c-cfds2"] Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.522102 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.529392 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.529606 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.529747 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.546954 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5fb676564c-cfds2"] Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615583 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-config-data\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615644 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-combined-ca-bundle\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615664 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-internal-tls-certs\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615687 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-etc-swift\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615788 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-run-httpd\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615804 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwp25\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-kube-api-access-xwp25\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615819 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-public-tls-certs\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.615837 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-log-httpd\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718565 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-run-httpd\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718649 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwp25\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-kube-api-access-xwp25\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718737 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-public-tls-certs\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718761 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-log-httpd\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718834 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-config-data\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718893 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-combined-ca-bundle\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718909 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-internal-tls-certs\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.718954 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-etc-swift\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.719227 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-run-httpd\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.719472 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-log-httpd\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.727133 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-etc-swift\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.731128 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-public-tls-certs\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.731908 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-internal-tls-certs\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.735421 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-config-data\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.743426 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-combined-ca-bundle\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.744381 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwp25\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-kube-api-access-xwp25\") pod \"swift-proxy-5fb676564c-cfds2\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.834443 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.876949 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:44 crc kubenswrapper[4954]: I1128 16:33:44.948487 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c4439c3d-c90f-4b13-87a4-01c211cec875","Type":"ContainerStarted","Data":"822046e6502bba3f5d237bf8d1ee0f710eb4ce9b4d9eb0e34a6234ad3191b3a9"} Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.473705 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5fb676564c-cfds2"] Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.603812 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.604088 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-central-agent" containerID="cri-o://6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186" gracePeriod=30 Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.604419 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="proxy-httpd" containerID="cri-o://8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d" gracePeriod=30 Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.604508 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-notification-agent" containerID="cri-o://e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c" gracePeriod=30 Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.604511 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="sg-core" containerID="cri-o://7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb" gracePeriod=30 Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.881866 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65dba6ce-1c65-433c-90bd-c721bf7171f0" path="/var/lib/kubelet/pods/65dba6ce-1c65-433c-90bd-c721bf7171f0/volumes" Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.972895 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c4439c3d-c90f-4b13-87a4-01c211cec875","Type":"ContainerStarted","Data":"e0e955ad82d7153e1ce49532c6e27790509f8ccd441ac5a41fe5281a1f7f3c3a"} Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.976237 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5fb676564c-cfds2" event={"ID":"3022fb04-fc0f-44b1-9f97-3893aa4bdd68","Type":"ContainerStarted","Data":"47dde57efce82402d7d8343557f1b456ad132ea3ccd746b7fa93ff0c28c1f4c6"} Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.980588 4954 generic.go:334] "Generic (PLEG): container finished" podID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerID="8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d" exitCode=0 Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.980624 4954 generic.go:334] "Generic (PLEG): container finished" podID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerID="7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb" exitCode=2 Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.980644 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerDied","Data":"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d"} Nov 28 16:33:45 crc kubenswrapper[4954]: I1128 16:33:45.980673 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerDied","Data":"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb"} Nov 28 16:33:46 crc kubenswrapper[4954]: I1128 16:33:46.986850 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:46 crc kubenswrapper[4954]: I1128 16:33:46.998629 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5fb676564c-cfds2" event={"ID":"3022fb04-fc0f-44b1-9f97-3893aa4bdd68","Type":"ContainerStarted","Data":"6537c4c8cd8dd3fab3e5160d3fe8c38e05653b1a2fb44a373fda2650bb61ce42"} Nov 28 16:33:46 crc kubenswrapper[4954]: I1128 16:33:46.998681 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5fb676564c-cfds2" event={"ID":"3022fb04-fc0f-44b1-9f97-3893aa4bdd68","Type":"ContainerStarted","Data":"97b634ca797866be1255f1100d6abda787c78438108b953d4c36c125fda1b7b9"} Nov 28 16:33:46 crc kubenswrapper[4954]: I1128 16:33:46.999759 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:46 crc kubenswrapper[4954]: I1128 16:33:46.999792 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.009325 4954 generic.go:334] "Generic (PLEG): container finished" podID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerID="e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c" exitCode=0 Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.009363 4954 generic.go:334] "Generic (PLEG): container finished" podID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerID="6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186" exitCode=0 Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.009411 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerDied","Data":"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c"} Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.009444 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerDied","Data":"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186"} Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.009457 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6361e8de-58e2-48d9-a6c7-53466e37e3a5","Type":"ContainerDied","Data":"e83903f487a2cb8f86d554d66b1c110d7832e2f12dad148451c96d5d8c25b44c"} Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.009475 4954 scope.go:117] "RemoveContainer" containerID="8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.009640 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.030646 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c4439c3d-c90f-4b13-87a4-01c211cec875","Type":"ContainerStarted","Data":"01194eaa4540cc51e694ffb69c1bc9bb2fcad75b283509d2f2d288ca98fa6388"} Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.034324 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.065324 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5fb676564c-cfds2" podStartSLOduration=3.06528313 podStartE2EDuration="3.06528313s" podCreationTimestamp="2025-11-28 16:33:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:47.039836246 +0000 UTC m=+1380.431504787" watchObservedRunningTime="2025-11-28 16:33:47.06528313 +0000 UTC m=+1380.456951701" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.073153 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-sg-core-conf-yaml\") pod \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.073304 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-log-httpd\") pod \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.073334 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-combined-ca-bundle\") pod \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.073364 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-run-httpd\") pod \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.073407 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-config-data\") pod \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.073441 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-scripts\") pod \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.073501 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2snd\" (UniqueName: \"kubernetes.io/projected/6361e8de-58e2-48d9-a6c7-53466e37e3a5-kube-api-access-r2snd\") pod \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\" (UID: \"6361e8de-58e2-48d9-a6c7-53466e37e3a5\") " Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.074281 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6361e8de-58e2-48d9-a6c7-53466e37e3a5" (UID: "6361e8de-58e2-48d9-a6c7-53466e37e3a5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.074925 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6361e8de-58e2-48d9-a6c7-53466e37e3a5" (UID: "6361e8de-58e2-48d9-a6c7-53466e37e3a5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.084710 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6361e8de-58e2-48d9-a6c7-53466e37e3a5-kube-api-access-r2snd" (OuterVolumeSpecName: "kube-api-access-r2snd") pod "6361e8de-58e2-48d9-a6c7-53466e37e3a5" (UID: "6361e8de-58e2-48d9-a6c7-53466e37e3a5"). InnerVolumeSpecName "kube-api-access-r2snd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.085538 4954 scope.go:117] "RemoveContainer" containerID="7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.088137 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.088115172 podStartE2EDuration="4.088115172s" podCreationTimestamp="2025-11-28 16:33:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:33:47.068626086 +0000 UTC m=+1380.460294647" watchObservedRunningTime="2025-11-28 16:33:47.088115172 +0000 UTC m=+1380.479783723" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.112109 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-scripts" (OuterVolumeSpecName: "scripts") pod "6361e8de-58e2-48d9-a6c7-53466e37e3a5" (UID: "6361e8de-58e2-48d9-a6c7-53466e37e3a5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.154765 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6361e8de-58e2-48d9-a6c7-53466e37e3a5" (UID: "6361e8de-58e2-48d9-a6c7-53466e37e3a5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.176924 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.176959 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2snd\" (UniqueName: \"kubernetes.io/projected/6361e8de-58e2-48d9-a6c7-53466e37e3a5-kube-api-access-r2snd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.176971 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.176981 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.176994 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6361e8de-58e2-48d9-a6c7-53466e37e3a5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.267144 4954 scope.go:117] "RemoveContainer" containerID="e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.295224 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6361e8de-58e2-48d9-a6c7-53466e37e3a5" (UID: "6361e8de-58e2-48d9-a6c7-53466e37e3a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.320572 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-config-data" (OuterVolumeSpecName: "config-data") pod "6361e8de-58e2-48d9-a6c7-53466e37e3a5" (UID: "6361e8de-58e2-48d9-a6c7-53466e37e3a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.345338 4954 scope.go:117] "RemoveContainer" containerID="6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.375719 4954 scope.go:117] "RemoveContainer" containerID="8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d" Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.376145 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d\": container with ID starting with 8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d not found: ID does not exist" containerID="8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.376174 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d"} err="failed to get container status \"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d\": rpc error: code = NotFound desc = could not find container \"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d\": container with ID starting with 8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.376314 4954 scope.go:117] "RemoveContainer" containerID="7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb" Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.376807 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb\": container with ID starting with 7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb not found: ID does not exist" containerID="7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.376827 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb"} err="failed to get container status \"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb\": rpc error: code = NotFound desc = could not find container \"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb\": container with ID starting with 7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.376839 4954 scope.go:117] "RemoveContainer" containerID="e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c" Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.377623 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c\": container with ID starting with e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c not found: ID does not exist" containerID="e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.377643 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c"} err="failed to get container status \"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c\": rpc error: code = NotFound desc = could not find container \"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c\": container with ID starting with e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.377657 4954 scope.go:117] "RemoveContainer" containerID="6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.380685 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.380729 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6361e8de-58e2-48d9-a6c7-53466e37e3a5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.382944 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186\": container with ID starting with 6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186 not found: ID does not exist" containerID="6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.382966 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186"} err="failed to get container status \"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186\": rpc error: code = NotFound desc = could not find container \"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186\": container with ID starting with 6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186 not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.382985 4954 scope.go:117] "RemoveContainer" containerID="8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.388185 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d"} err="failed to get container status \"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d\": rpc error: code = NotFound desc = could not find container \"8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d\": container with ID starting with 8ae2d87b44c535625c9d50f7d5f80768d7d1e3524fa9b93bce165f65d513469d not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.388262 4954 scope.go:117] "RemoveContainer" containerID="7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.389972 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb"} err="failed to get container status \"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb\": rpc error: code = NotFound desc = could not find container \"7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb\": container with ID starting with 7b422d81331b4dc15e2728f754c279f290677343ddb9c746483801042622c6bb not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.390020 4954 scope.go:117] "RemoveContainer" containerID="e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.391893 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c"} err="failed to get container status \"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c\": rpc error: code = NotFound desc = could not find container \"e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c\": container with ID starting with e1d2bcfe540771d49850f618e23678d5cb0d9fc66de618ee192233e2c958611c not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.391913 4954 scope.go:117] "RemoveContainer" containerID="6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.393917 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186"} err="failed to get container status \"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186\": rpc error: code = NotFound desc = could not find container \"6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186\": container with ID starting with 6293e8083f3d31d8989388a67e1e45f1b8298bf4cc96542c0d1bca7871735186 not found: ID does not exist" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.399669 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.714129 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.722745 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.741382 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.741872 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-notification-agent" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.741894 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-notification-agent" Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.741921 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="sg-core" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.741929 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="sg-core" Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.741944 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-central-agent" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.741952 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-central-agent" Nov 28 16:33:47 crc kubenswrapper[4954]: E1128 16:33:47.741990 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="proxy-httpd" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.741998 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="proxy-httpd" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.742205 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="sg-core" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.742230 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="proxy-httpd" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.742245 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-notification-agent" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.742261 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" containerName="ceilometer-central-agent" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.751994 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.757231 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.757438 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.772923 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.902406 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-run-httpd\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.902452 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-log-httpd\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.902521 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-config-data\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.902557 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.902581 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzv2r\" (UniqueName: \"kubernetes.io/projected/6354503c-292f-4372-866e-6a627c56c216-kube-api-access-pzv2r\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.902597 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.902732 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-scripts\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:47 crc kubenswrapper[4954]: I1128 16:33:47.928709 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6361e8de-58e2-48d9-a6c7-53466e37e3a5" path="/var/lib/kubelet/pods/6361e8de-58e2-48d9-a6c7-53466e37e3a5/volumes" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.004926 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-run-httpd\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.005379 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-run-httpd\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.005442 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-log-httpd\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.005726 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-log-httpd\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.006429 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-config-data\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.006465 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.007009 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzv2r\" (UniqueName: \"kubernetes.io/projected/6354503c-292f-4372-866e-6a627c56c216-kube-api-access-pzv2r\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.007031 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.007889 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-scripts\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.011396 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.011970 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-scripts\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.012064 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-config-data\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.014353 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.026284 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzv2r\" (UniqueName: \"kubernetes.io/projected/6354503c-292f-4372-866e-6a627c56c216-kube-api-access-pzv2r\") pod \"ceilometer-0\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.216928 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:33:48 crc kubenswrapper[4954]: I1128 16:33:48.821791 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:48 crc kubenswrapper[4954]: W1128 16:33:48.844915 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6354503c_292f_4372_866e_6a627c56c216.slice/crio-bdf81a473c1f178c24e735cdd087c3d73185fe808434af43ca21e876314ee182 WatchSource:0}: Error finding container bdf81a473c1f178c24e735cdd087c3d73185fe808434af43ca21e876314ee182: Status 404 returned error can't find the container with id bdf81a473c1f178c24e735cdd087c3d73185fe808434af43ca21e876314ee182 Nov 28 16:33:49 crc kubenswrapper[4954]: I1128 16:33:49.068496 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerStarted","Data":"bdf81a473c1f178c24e735cdd087c3d73185fe808434af43ca21e876314ee182"} Nov 28 16:33:49 crc kubenswrapper[4954]: I1128 16:33:49.308413 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 16:33:49 crc kubenswrapper[4954]: I1128 16:33:49.619139 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:33:49 crc kubenswrapper[4954]: I1128 16:33:49.693980 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-67c7f88868-224xn"] Nov 28 16:33:49 crc kubenswrapper[4954]: I1128 16:33:49.694489 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-67c7f88868-224xn" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-api" containerID="cri-o://a0dd28b9942fb8fdc120a20870acce41f7cec85c79b45f3505224a17e9537cbd" gracePeriod=30 Nov 28 16:33:49 crc kubenswrapper[4954]: I1128 16:33:49.695018 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-67c7f88868-224xn" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-httpd" containerID="cri-o://03986f5ef00d106e9e4bf7e552bd6d38547e854ac711405aa68cf159ed4e9276" gracePeriod=30 Nov 28 16:33:50 crc kubenswrapper[4954]: E1128 16:33:50.048966 4954 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ee4f765_e243_4661_b118_dbf3414cfb2e.slice/crio-conmon-03986f5ef00d106e9e4bf7e552bd6d38547e854ac711405aa68cf159ed4e9276.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ee4f765_e243_4661_b118_dbf3414cfb2e.slice/crio-03986f5ef00d106e9e4bf7e552bd6d38547e854ac711405aa68cf159ed4e9276.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:33:50 crc kubenswrapper[4954]: I1128 16:33:50.085224 4954 generic.go:334] "Generic (PLEG): container finished" podID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerID="03986f5ef00d106e9e4bf7e552bd6d38547e854ac711405aa68cf159ed4e9276" exitCode=0 Nov 28 16:33:50 crc kubenswrapper[4954]: I1128 16:33:50.085273 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67c7f88868-224xn" event={"ID":"7ee4f765-e243-4661-b118-dbf3414cfb2e","Type":"ContainerDied","Data":"03986f5ef00d106e9e4bf7e552bd6d38547e854ac711405aa68cf159ed4e9276"} Nov 28 16:33:51 crc kubenswrapper[4954]: I1128 16:33:51.794015 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.774059 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-qtp2z"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.776144 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.791774 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-qtp2z"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.876180 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-lpwkg"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.877410 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.886448 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f92b-account-create-update-j96hc"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.887696 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.890887 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.893986 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-lpwkg"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.909784 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dac05eb0-cb63-47c0-8542-0e33bc1d8963-operator-scripts\") pod \"nova-api-db-create-qtp2z\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.909834 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5bqk\" (UniqueName: \"kubernetes.io/projected/dac05eb0-cb63-47c0-8542-0e33bc1d8963-kube-api-access-z5bqk\") pod \"nova-api-db-create-qtp2z\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.917942 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f92b-account-create-update-j96hc"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.983242 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-dqk5s"] Nov 28 16:33:52 crc kubenswrapper[4954]: I1128 16:33:52.984354 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.022431 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dqk5s"] Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.023771 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ww5tf\" (UniqueName: \"kubernetes.io/projected/d62a7c52-13eb-499e-acb2-8899ac6984f8-kube-api-access-ww5tf\") pod \"nova-api-f92b-account-create-update-j96hc\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.023828 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dac05eb0-cb63-47c0-8542-0e33bc1d8963-operator-scripts\") pod \"nova-api-db-create-qtp2z\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.023863 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5bqk\" (UniqueName: \"kubernetes.io/projected/dac05eb0-cb63-47c0-8542-0e33bc1d8963-kube-api-access-z5bqk\") pod \"nova-api-db-create-qtp2z\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.023895 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-operator-scripts\") pod \"nova-cell0-db-create-lpwkg\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.023979 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvksg\" (UniqueName: \"kubernetes.io/projected/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-kube-api-access-dvksg\") pod \"nova-cell0-db-create-lpwkg\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.024029 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d62a7c52-13eb-499e-acb2-8899ac6984f8-operator-scripts\") pod \"nova-api-f92b-account-create-update-j96hc\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.024797 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dac05eb0-cb63-47c0-8542-0e33bc1d8963-operator-scripts\") pod \"nova-api-db-create-qtp2z\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.054433 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5bqk\" (UniqueName: \"kubernetes.io/projected/dac05eb0-cb63-47c0-8542-0e33bc1d8963-kube-api-access-z5bqk\") pod \"nova-api-db-create-qtp2z\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.082272 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-528b-account-create-update-g72ls"] Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.083543 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.087184 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.105032 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.114837 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-528b-account-create-update-g72ls"] Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.126634 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ww5tf\" (UniqueName: \"kubernetes.io/projected/d62a7c52-13eb-499e-acb2-8899ac6984f8-kube-api-access-ww5tf\") pod \"nova-api-f92b-account-create-update-j96hc\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.126687 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-operator-scripts\") pod \"nova-cell0-db-create-lpwkg\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.126742 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf925\" (UniqueName: \"kubernetes.io/projected/9c07c750-8357-4ba2-a802-86a6f90ec496-kube-api-access-nf925\") pod \"nova-cell1-db-create-dqk5s\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.126778 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c07c750-8357-4ba2-a802-86a6f90ec496-operator-scripts\") pod \"nova-cell1-db-create-dqk5s\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.128158 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvksg\" (UniqueName: \"kubernetes.io/projected/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-kube-api-access-dvksg\") pod \"nova-cell0-db-create-lpwkg\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.128270 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d62a7c52-13eb-499e-acb2-8899ac6984f8-operator-scripts\") pod \"nova-api-f92b-account-create-update-j96hc\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.128493 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-operator-scripts\") pod \"nova-cell0-db-create-lpwkg\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.129212 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d62a7c52-13eb-499e-acb2-8899ac6984f8-operator-scripts\") pod \"nova-api-f92b-account-create-update-j96hc\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.146042 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvksg\" (UniqueName: \"kubernetes.io/projected/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-kube-api-access-dvksg\") pod \"nova-cell0-db-create-lpwkg\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.157436 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ww5tf\" (UniqueName: \"kubernetes.io/projected/d62a7c52-13eb-499e-acb2-8899ac6984f8-kube-api-access-ww5tf\") pod \"nova-api-f92b-account-create-update-j96hc\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.204808 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.220011 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.230613 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf925\" (UniqueName: \"kubernetes.io/projected/9c07c750-8357-4ba2-a802-86a6f90ec496-kube-api-access-nf925\") pod \"nova-cell1-db-create-dqk5s\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.230666 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c07c750-8357-4ba2-a802-86a6f90ec496-operator-scripts\") pod \"nova-cell1-db-create-dqk5s\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.230701 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b278f77-7514-4f2d-8db4-f9f209dc33dd-operator-scripts\") pod \"nova-cell0-528b-account-create-update-g72ls\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.230757 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6gfv\" (UniqueName: \"kubernetes.io/projected/7b278f77-7514-4f2d-8db4-f9f209dc33dd-kube-api-access-j6gfv\") pod \"nova-cell0-528b-account-create-update-g72ls\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.231690 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c07c750-8357-4ba2-a802-86a6f90ec496-operator-scripts\") pod \"nova-cell1-db-create-dqk5s\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.263876 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf925\" (UniqueName: \"kubernetes.io/projected/9c07c750-8357-4ba2-a802-86a6f90ec496-kube-api-access-nf925\") pod \"nova-cell1-db-create-dqk5s\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.284712 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-2675-account-create-update-gp79n"] Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.286293 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.291870 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.299434 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2675-account-create-update-gp79n"] Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.300356 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.332318 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6gfv\" (UniqueName: \"kubernetes.io/projected/7b278f77-7514-4f2d-8db4-f9f209dc33dd-kube-api-access-j6gfv\") pod \"nova-cell0-528b-account-create-update-g72ls\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.332522 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b278f77-7514-4f2d-8db4-f9f209dc33dd-operator-scripts\") pod \"nova-cell0-528b-account-create-update-g72ls\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.333457 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b278f77-7514-4f2d-8db4-f9f209dc33dd-operator-scripts\") pod \"nova-cell0-528b-account-create-update-g72ls\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.360010 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6gfv\" (UniqueName: \"kubernetes.io/projected/7b278f77-7514-4f2d-8db4-f9f209dc33dd-kube-api-access-j6gfv\") pod \"nova-cell0-528b-account-create-update-g72ls\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.431723 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.437366 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ebb6843-0b32-449c-9b8f-d817c3690e67-operator-scripts\") pod \"nova-cell1-2675-account-create-update-gp79n\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.437418 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2cff\" (UniqueName: \"kubernetes.io/projected/2ebb6843-0b32-449c-9b8f-d817c3690e67-kube-api-access-g2cff\") pod \"nova-cell1-2675-account-create-update-gp79n\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.540117 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ebb6843-0b32-449c-9b8f-d817c3690e67-operator-scripts\") pod \"nova-cell1-2675-account-create-update-gp79n\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.540175 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2cff\" (UniqueName: \"kubernetes.io/projected/2ebb6843-0b32-449c-9b8f-d817c3690e67-kube-api-access-g2cff\") pod \"nova-cell1-2675-account-create-update-gp79n\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.541607 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ebb6843-0b32-449c-9b8f-d817c3690e67-operator-scripts\") pod \"nova-cell1-2675-account-create-update-gp79n\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.559678 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2cff\" (UniqueName: \"kubernetes.io/projected/2ebb6843-0b32-449c-9b8f-d817c3690e67-kube-api-access-g2cff\") pod \"nova-cell1-2675-account-create-update-gp79n\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:53 crc kubenswrapper[4954]: I1128 16:33:53.625755 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:33:54 crc kubenswrapper[4954]: I1128 16:33:54.581329 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 16:33:54 crc kubenswrapper[4954]: I1128 16:33:54.883393 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:54 crc kubenswrapper[4954]: I1128 16:33:54.884696 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:33:57 crc kubenswrapper[4954]: I1128 16:33:57.217723 4954 generic.go:334] "Generic (PLEG): container finished" podID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerID="a0dd28b9942fb8fdc120a20870acce41f7cec85c79b45f3505224a17e9537cbd" exitCode=0 Nov 28 16:33:57 crc kubenswrapper[4954]: I1128 16:33:57.217793 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67c7f88868-224xn" event={"ID":"7ee4f765-e243-4661-b118-dbf3414cfb2e","Type":"ContainerDied","Data":"a0dd28b9942fb8fdc120a20870acce41f7cec85c79b45f3505224a17e9537cbd"} Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.205633 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.253769 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerStarted","Data":"3b5d5fde27f231ec519243cda1d3ddd769a8b433bc89a3eee13c5121be99e7dc"} Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.256025 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"082f7647-b97a-4d57-83b1-ae69d61827b1","Type":"ContainerStarted","Data":"8f3b70dbbdb6e3dcdff478850bd55b90a960a4d6e61d20eb2e977c9634518dce"} Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.263728 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67c7f88868-224xn" event={"ID":"7ee4f765-e243-4661-b118-dbf3414cfb2e","Type":"ContainerDied","Data":"0a4b1d579fc826aa27964640df0e192ce8cba1c6c97f0f55b4d56c3bea9bcd84"} Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.263781 4954 scope.go:117] "RemoveContainer" containerID="03986f5ef00d106e9e4bf7e552bd6d38547e854ac711405aa68cf159ed4e9276" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.263932 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67c7f88868-224xn" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.298603 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.394883202 podStartE2EDuration="19.298582437s" podCreationTimestamp="2025-11-28 16:33:40 +0000 UTC" firstStartedPulling="2025-11-28 16:33:41.746814586 +0000 UTC m=+1375.138483127" lastFinishedPulling="2025-11-28 16:33:58.650513811 +0000 UTC m=+1392.042182362" observedRunningTime="2025-11-28 16:33:59.277256323 +0000 UTC m=+1392.668924864" watchObservedRunningTime="2025-11-28 16:33:59.298582437 +0000 UTC m=+1392.690250988" Nov 28 16:33:59 crc kubenswrapper[4954]: W1128 16:33:59.339879 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd62a7c52_13eb_499e_acb2_8899ac6984f8.slice/crio-1817fb60d75199719c2452f46c946a54d568d281db6ab071e0d9f841a62f6949 WatchSource:0}: Error finding container 1817fb60d75199719c2452f46c946a54d568d281db6ab071e0d9f841a62f6949: Status 404 returned error can't find the container with id 1817fb60d75199719c2452f46c946a54d568d281db6ab071e0d9f841a62f6949 Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.339925 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f92b-account-create-update-j96hc"] Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.351603 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2675-account-create-update-gp79n"] Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.359508 4954 scope.go:117] "RemoveContainer" containerID="a0dd28b9942fb8fdc120a20870acce41f7cec85c79b45f3505224a17e9537cbd" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.363052 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-lpwkg"] Nov 28 16:33:59 crc kubenswrapper[4954]: W1128 16:33:59.368281 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ebb6843_0b32_449c_9b8f_d817c3690e67.slice/crio-18f415d0131d1fdb46c8da1edc18da40176e331194cd48eb6264109b2a7ee837 WatchSource:0}: Error finding container 18f415d0131d1fdb46c8da1edc18da40176e331194cd48eb6264109b2a7ee837: Status 404 returned error can't find the container with id 18f415d0131d1fdb46c8da1edc18da40176e331194cd48eb6264109b2a7ee837 Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.384335 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-config\") pod \"7ee4f765-e243-4661-b118-dbf3414cfb2e\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.384456 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb44n\" (UniqueName: \"kubernetes.io/projected/7ee4f765-e243-4661-b118-dbf3414cfb2e-kube-api-access-sb44n\") pod \"7ee4f765-e243-4661-b118-dbf3414cfb2e\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.384754 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-ovndb-tls-certs\") pod \"7ee4f765-e243-4661-b118-dbf3414cfb2e\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.384880 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-httpd-config\") pod \"7ee4f765-e243-4661-b118-dbf3414cfb2e\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.384995 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-combined-ca-bundle\") pod \"7ee4f765-e243-4661-b118-dbf3414cfb2e\" (UID: \"7ee4f765-e243-4661-b118-dbf3414cfb2e\") " Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.385084 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dqk5s"] Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.395176 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ee4f765-e243-4661-b118-dbf3414cfb2e-kube-api-access-sb44n" (OuterVolumeSpecName: "kube-api-access-sb44n") pod "7ee4f765-e243-4661-b118-dbf3414cfb2e" (UID: "7ee4f765-e243-4661-b118-dbf3414cfb2e"). InnerVolumeSpecName "kube-api-access-sb44n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.400443 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7ee4f765-e243-4661-b118-dbf3414cfb2e" (UID: "7ee4f765-e243-4661-b118-dbf3414cfb2e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.474289 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ee4f765-e243-4661-b118-dbf3414cfb2e" (UID: "7ee4f765-e243-4661-b118-dbf3414cfb2e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.488448 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.488478 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb44n\" (UniqueName: \"kubernetes.io/projected/7ee4f765-e243-4661-b118-dbf3414cfb2e-kube-api-access-sb44n\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.488488 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.496546 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-528b-account-create-update-g72ls"] Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.502927 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7ee4f765-e243-4661-b118-dbf3414cfb2e" (UID: "7ee4f765-e243-4661-b118-dbf3414cfb2e"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.519967 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-config" (OuterVolumeSpecName: "config") pod "7ee4f765-e243-4661-b118-dbf3414cfb2e" (UID: "7ee4f765-e243-4661-b118-dbf3414cfb2e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.590903 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.590936 4954 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ee4f765-e243-4661-b118-dbf3414cfb2e-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.592960 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-qtp2z"] Nov 28 16:33:59 crc kubenswrapper[4954]: W1128 16:33:59.619681 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddac05eb0_cb63_47c0_8542_0e33bc1d8963.slice/crio-77605ad6a4c3666dda5f056ab72d7c4ee1d207ce11cbde91ea444614103e29e5 WatchSource:0}: Error finding container 77605ad6a4c3666dda5f056ab72d7c4ee1d207ce11cbde91ea444614103e29e5: Status 404 returned error can't find the container with id 77605ad6a4c3666dda5f056ab72d7c4ee1d207ce11cbde91ea444614103e29e5 Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.620219 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-67c7f88868-224xn"] Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.663496 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-67c7f88868-224xn"] Nov 28 16:33:59 crc kubenswrapper[4954]: I1128 16:33:59.869663 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" path="/var/lib/kubelet/pods/7ee4f765-e243-4661-b118-dbf3414cfb2e/volumes" Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.279364 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-528b-account-create-update-g72ls" event={"ID":"7b278f77-7514-4f2d-8db4-f9f209dc33dd","Type":"ContainerStarted","Data":"9790d31bac29981012dc4eb21b9dc17ee5a29b86264289bf2b4f88e66b611652"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.279705 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-528b-account-create-update-g72ls" event={"ID":"7b278f77-7514-4f2d-8db4-f9f209dc33dd","Type":"ContainerStarted","Data":"26a00caacc9e5cfb53dd422178d5e0de2d78475a2a4fde8e79d5449f16f9c003"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.286690 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-qtp2z" event={"ID":"dac05eb0-cb63-47c0-8542-0e33bc1d8963","Type":"ContainerStarted","Data":"b32195ea2b6bb28ee8316a1b9b4bf35d1d04d3b7ae1a048a81a1ccfdd4512e24"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.286746 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-qtp2z" event={"ID":"dac05eb0-cb63-47c0-8542-0e33bc1d8963","Type":"ContainerStarted","Data":"77605ad6a4c3666dda5f056ab72d7c4ee1d207ce11cbde91ea444614103e29e5"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.293180 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dqk5s" event={"ID":"9c07c750-8357-4ba2-a802-86a6f90ec496","Type":"ContainerStarted","Data":"14738597839ebf173901848778ebcbc2200b247d2d7cda2691a12638166bcddc"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.293227 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dqk5s" event={"ID":"9c07c750-8357-4ba2-a802-86a6f90ec496","Type":"ContainerStarted","Data":"a7b514ff7f87d39410f2dcff95f22d698419db044023f50575ef328894905d57"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.304883 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2675-account-create-update-gp79n" event={"ID":"2ebb6843-0b32-449c-9b8f-d817c3690e67","Type":"ContainerStarted","Data":"52deb0917a2ec0317872f4eeacd25fd59c248a2c1d2a87b43ad82bac7ff9de0b"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.304940 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2675-account-create-update-gp79n" event={"ID":"2ebb6843-0b32-449c-9b8f-d817c3690e67","Type":"ContainerStarted","Data":"18f415d0131d1fdb46c8da1edc18da40176e331194cd48eb6264109b2a7ee837"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.305399 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-528b-account-create-update-g72ls" podStartSLOduration=7.305389637 podStartE2EDuration="7.305389637s" podCreationTimestamp="2025-11-28 16:33:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:00.302292479 +0000 UTC m=+1393.693961020" watchObservedRunningTime="2025-11-28 16:34:00.305389637 +0000 UTC m=+1393.697058178" Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.317197 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f92b-account-create-update-j96hc" event={"ID":"d62a7c52-13eb-499e-acb2-8899ac6984f8","Type":"ContainerStarted","Data":"3fd64b82b333a2706f84c6fbd560eff95ef6f361783870f68150a1b52899e83e"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.317252 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f92b-account-create-update-j96hc" event={"ID":"d62a7c52-13eb-499e-acb2-8899ac6984f8","Type":"ContainerStarted","Data":"1817fb60d75199719c2452f46c946a54d568d281db6ab071e0d9f841a62f6949"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.320657 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lpwkg" event={"ID":"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a","Type":"ContainerStarted","Data":"009a5d1b2959b5c543787965c35edf048d0545676eeab27f33be2b63101529d1"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.320706 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lpwkg" event={"ID":"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a","Type":"ContainerStarted","Data":"28b931db958456eb02fcec7901b751e0e197c9c7b3e05cc68218e02c81d097c7"} Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.331678 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-dqk5s" podStartSLOduration=8.331661597 podStartE2EDuration="8.331661597s" podCreationTimestamp="2025-11-28 16:33:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:00.325004737 +0000 UTC m=+1393.716673288" watchObservedRunningTime="2025-11-28 16:34:00.331661597 +0000 UTC m=+1393.723330138" Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.354866 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-qtp2z" podStartSLOduration=8.354849479 podStartE2EDuration="8.354849479s" podCreationTimestamp="2025-11-28 16:33:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:00.351687319 +0000 UTC m=+1393.743355860" watchObservedRunningTime="2025-11-28 16:34:00.354849479 +0000 UTC m=+1393.746518020" Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.369300 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-2675-account-create-update-gp79n" podStartSLOduration=7.369285405 podStartE2EDuration="7.369285405s" podCreationTimestamp="2025-11-28 16:33:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:00.368690376 +0000 UTC m=+1393.760358917" watchObservedRunningTime="2025-11-28 16:34:00.369285405 +0000 UTC m=+1393.760953946" Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.392351 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-lpwkg" podStartSLOduration=8.392334484 podStartE2EDuration="8.392334484s" podCreationTimestamp="2025-11-28 16:33:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:00.387996746 +0000 UTC m=+1393.779665287" watchObservedRunningTime="2025-11-28 16:34:00.392334484 +0000 UTC m=+1393.784003025" Nov 28 16:34:00 crc kubenswrapper[4954]: I1128 16:34:00.412034 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-f92b-account-create-update-j96hc" podStartSLOduration=8.412017946 podStartE2EDuration="8.412017946s" podCreationTimestamp="2025-11-28 16:33:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:00.402481234 +0000 UTC m=+1393.794149775" watchObservedRunningTime="2025-11-28 16:34:00.412017946 +0000 UTC m=+1393.803686487" Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.335670 4954 generic.go:334] "Generic (PLEG): container finished" podID="ee22e0b0-27ab-4365-9ccc-6b563eb44b3a" containerID="009a5d1b2959b5c543787965c35edf048d0545676eeab27f33be2b63101529d1" exitCode=0 Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.335775 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lpwkg" event={"ID":"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a","Type":"ContainerDied","Data":"009a5d1b2959b5c543787965c35edf048d0545676eeab27f33be2b63101529d1"} Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.338690 4954 generic.go:334] "Generic (PLEG): container finished" podID="7b278f77-7514-4f2d-8db4-f9f209dc33dd" containerID="9790d31bac29981012dc4eb21b9dc17ee5a29b86264289bf2b4f88e66b611652" exitCode=0 Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.338799 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-528b-account-create-update-g72ls" event={"ID":"7b278f77-7514-4f2d-8db4-f9f209dc33dd","Type":"ContainerDied","Data":"9790d31bac29981012dc4eb21b9dc17ee5a29b86264289bf2b4f88e66b611652"} Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.341871 4954 generic.go:334] "Generic (PLEG): container finished" podID="dac05eb0-cb63-47c0-8542-0e33bc1d8963" containerID="b32195ea2b6bb28ee8316a1b9b4bf35d1d04d3b7ae1a048a81a1ccfdd4512e24" exitCode=0 Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.341930 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-qtp2z" event={"ID":"dac05eb0-cb63-47c0-8542-0e33bc1d8963","Type":"ContainerDied","Data":"b32195ea2b6bb28ee8316a1b9b4bf35d1d04d3b7ae1a048a81a1ccfdd4512e24"} Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.349926 4954 generic.go:334] "Generic (PLEG): container finished" podID="9c07c750-8357-4ba2-a802-86a6f90ec496" containerID="14738597839ebf173901848778ebcbc2200b247d2d7cda2691a12638166bcddc" exitCode=0 Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.350011 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dqk5s" event={"ID":"9c07c750-8357-4ba2-a802-86a6f90ec496","Type":"ContainerDied","Data":"14738597839ebf173901848778ebcbc2200b247d2d7cda2691a12638166bcddc"} Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.353112 4954 generic.go:334] "Generic (PLEG): container finished" podID="2ebb6843-0b32-449c-9b8f-d817c3690e67" containerID="52deb0917a2ec0317872f4eeacd25fd59c248a2c1d2a87b43ad82bac7ff9de0b" exitCode=0 Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.353167 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2675-account-create-update-gp79n" event={"ID":"2ebb6843-0b32-449c-9b8f-d817c3690e67","Type":"ContainerDied","Data":"52deb0917a2ec0317872f4eeacd25fd59c248a2c1d2a87b43ad82bac7ff9de0b"} Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.360220 4954 generic.go:334] "Generic (PLEG): container finished" podID="d62a7c52-13eb-499e-acb2-8899ac6984f8" containerID="3fd64b82b333a2706f84c6fbd560eff95ef6f361783870f68150a1b52899e83e" exitCode=0 Nov 28 16:34:01 crc kubenswrapper[4954]: I1128 16:34:01.360270 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f92b-account-create-update-j96hc" event={"ID":"d62a7c52-13eb-499e-acb2-8899ac6984f8","Type":"ContainerDied","Data":"3fd64b82b333a2706f84c6fbd560eff95ef6f361783870f68150a1b52899e83e"} Nov 28 16:34:02 crc kubenswrapper[4954]: I1128 16:34:02.372424 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerStarted","Data":"dd861baba89b530fcd7633432d74954d3f22083630d3a619809128eaa8d58384"} Nov 28 16:34:02 crc kubenswrapper[4954]: I1128 16:34:02.480775 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:34:02 crc kubenswrapper[4954]: I1128 16:34:02.480837 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:34:02 crc kubenswrapper[4954]: I1128 16:34:02.480886 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:34:02 crc kubenswrapper[4954]: I1128 16:34:02.481742 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"63d17d14b7b387446e6790d2542f732de1df551f140ddd4727c4084d5078e4ad"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:34:02 crc kubenswrapper[4954]: I1128 16:34:02.481798 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://63d17d14b7b387446e6790d2542f732de1df551f140ddd4727c4084d5078e4ad" gracePeriod=600 Nov 28 16:34:02 crc kubenswrapper[4954]: I1128 16:34:02.918518 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.077869 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2cff\" (UniqueName: \"kubernetes.io/projected/2ebb6843-0b32-449c-9b8f-d817c3690e67-kube-api-access-g2cff\") pod \"2ebb6843-0b32-449c-9b8f-d817c3690e67\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.078094 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ebb6843-0b32-449c-9b8f-d817c3690e67-operator-scripts\") pod \"2ebb6843-0b32-449c-9b8f-d817c3690e67\" (UID: \"2ebb6843-0b32-449c-9b8f-d817c3690e67\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.079714 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ebb6843-0b32-449c-9b8f-d817c3690e67-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2ebb6843-0b32-449c-9b8f-d817c3690e67" (UID: "2ebb6843-0b32-449c-9b8f-d817c3690e67"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.085788 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ebb6843-0b32-449c-9b8f-d817c3690e67-kube-api-access-g2cff" (OuterVolumeSpecName: "kube-api-access-g2cff") pod "2ebb6843-0b32-449c-9b8f-d817c3690e67" (UID: "2ebb6843-0b32-449c-9b8f-d817c3690e67"). InnerVolumeSpecName "kube-api-access-g2cff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.128195 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.145859 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.157168 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.180013 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ebb6843-0b32-449c-9b8f-d817c3690e67-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.180327 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2cff\" (UniqueName: \"kubernetes.io/projected/2ebb6843-0b32-449c-9b8f-d817c3690e67-kube-api-access-g2cff\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.181777 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.182315 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.280955 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dac05eb0-cb63-47c0-8542-0e33bc1d8963-operator-scripts\") pod \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281031 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nf925\" (UniqueName: \"kubernetes.io/projected/9c07c750-8357-4ba2-a802-86a6f90ec496-kube-api-access-nf925\") pod \"9c07c750-8357-4ba2-a802-86a6f90ec496\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281059 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d62a7c52-13eb-499e-acb2-8899ac6984f8-operator-scripts\") pod \"d62a7c52-13eb-499e-acb2-8899ac6984f8\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281084 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b278f77-7514-4f2d-8db4-f9f209dc33dd-operator-scripts\") pod \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281163 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-operator-scripts\") pod \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281207 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6gfv\" (UniqueName: \"kubernetes.io/projected/7b278f77-7514-4f2d-8db4-f9f209dc33dd-kube-api-access-j6gfv\") pod \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\" (UID: \"7b278f77-7514-4f2d-8db4-f9f209dc33dd\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281228 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5bqk\" (UniqueName: \"kubernetes.io/projected/dac05eb0-cb63-47c0-8542-0e33bc1d8963-kube-api-access-z5bqk\") pod \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\" (UID: \"dac05eb0-cb63-47c0-8542-0e33bc1d8963\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281286 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ww5tf\" (UniqueName: \"kubernetes.io/projected/d62a7c52-13eb-499e-acb2-8899ac6984f8-kube-api-access-ww5tf\") pod \"d62a7c52-13eb-499e-acb2-8899ac6984f8\" (UID: \"d62a7c52-13eb-499e-acb2-8899ac6984f8\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281325 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvksg\" (UniqueName: \"kubernetes.io/projected/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-kube-api-access-dvksg\") pod \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\" (UID: \"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.281357 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c07c750-8357-4ba2-a802-86a6f90ec496-operator-scripts\") pod \"9c07c750-8357-4ba2-a802-86a6f90ec496\" (UID: \"9c07c750-8357-4ba2-a802-86a6f90ec496\") " Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.282458 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d62a7c52-13eb-499e-acb2-8899ac6984f8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d62a7c52-13eb-499e-acb2-8899ac6984f8" (UID: "d62a7c52-13eb-499e-acb2-8899ac6984f8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.282897 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dac05eb0-cb63-47c0-8542-0e33bc1d8963-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dac05eb0-cb63-47c0-8542-0e33bc1d8963" (UID: "dac05eb0-cb63-47c0-8542-0e33bc1d8963"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.282909 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b278f77-7514-4f2d-8db4-f9f209dc33dd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7b278f77-7514-4f2d-8db4-f9f209dc33dd" (UID: "7b278f77-7514-4f2d-8db4-f9f209dc33dd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.283030 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ee22e0b0-27ab-4365-9ccc-6b563eb44b3a" (UID: "ee22e0b0-27ab-4365-9ccc-6b563eb44b3a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.283669 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c07c750-8357-4ba2-a802-86a6f90ec496-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9c07c750-8357-4ba2-a802-86a6f90ec496" (UID: "9c07c750-8357-4ba2-a802-86a6f90ec496"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.286759 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c07c750-8357-4ba2-a802-86a6f90ec496-kube-api-access-nf925" (OuterVolumeSpecName: "kube-api-access-nf925") pod "9c07c750-8357-4ba2-a802-86a6f90ec496" (UID: "9c07c750-8357-4ba2-a802-86a6f90ec496"). InnerVolumeSpecName "kube-api-access-nf925". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.287232 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b278f77-7514-4f2d-8db4-f9f209dc33dd-kube-api-access-j6gfv" (OuterVolumeSpecName: "kube-api-access-j6gfv") pod "7b278f77-7514-4f2d-8db4-f9f209dc33dd" (UID: "7b278f77-7514-4f2d-8db4-f9f209dc33dd"). InnerVolumeSpecName "kube-api-access-j6gfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.287859 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-kube-api-access-dvksg" (OuterVolumeSpecName: "kube-api-access-dvksg") pod "ee22e0b0-27ab-4365-9ccc-6b563eb44b3a" (UID: "ee22e0b0-27ab-4365-9ccc-6b563eb44b3a"). InnerVolumeSpecName "kube-api-access-dvksg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.290610 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d62a7c52-13eb-499e-acb2-8899ac6984f8-kube-api-access-ww5tf" (OuterVolumeSpecName: "kube-api-access-ww5tf") pod "d62a7c52-13eb-499e-acb2-8899ac6984f8" (UID: "d62a7c52-13eb-499e-acb2-8899ac6984f8"). InnerVolumeSpecName "kube-api-access-ww5tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.296429 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dac05eb0-cb63-47c0-8542-0e33bc1d8963-kube-api-access-z5bqk" (OuterVolumeSpecName: "kube-api-access-z5bqk") pod "dac05eb0-cb63-47c0-8542-0e33bc1d8963" (UID: "dac05eb0-cb63-47c0-8542-0e33bc1d8963"). InnerVolumeSpecName "kube-api-access-z5bqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.383469 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2675-account-create-update-gp79n" event={"ID":"2ebb6843-0b32-449c-9b8f-d817c3690e67","Type":"ContainerDied","Data":"18f415d0131d1fdb46c8da1edc18da40176e331194cd48eb6264109b2a7ee837"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.383515 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18f415d0131d1fdb46c8da1edc18da40176e331194cd48eb6264109b2a7ee837" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.383598 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2675-account-create-update-gp79n" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384849 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dac05eb0-cb63-47c0-8542-0e33bc1d8963-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384872 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nf925\" (UniqueName: \"kubernetes.io/projected/9c07c750-8357-4ba2-a802-86a6f90ec496-kube-api-access-nf925\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384884 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d62a7c52-13eb-499e-acb2-8899ac6984f8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384898 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b278f77-7514-4f2d-8db4-f9f209dc33dd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384908 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384919 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6gfv\" (UniqueName: \"kubernetes.io/projected/7b278f77-7514-4f2d-8db4-f9f209dc33dd-kube-api-access-j6gfv\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384929 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5bqk\" (UniqueName: \"kubernetes.io/projected/dac05eb0-cb63-47c0-8542-0e33bc1d8963-kube-api-access-z5bqk\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384942 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ww5tf\" (UniqueName: \"kubernetes.io/projected/d62a7c52-13eb-499e-acb2-8899ac6984f8-kube-api-access-ww5tf\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384953 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvksg\" (UniqueName: \"kubernetes.io/projected/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a-kube-api-access-dvksg\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.384965 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c07c750-8357-4ba2-a802-86a6f90ec496-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.386463 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f92b-account-create-update-j96hc" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.390594 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f92b-account-create-update-j96hc" event={"ID":"d62a7c52-13eb-499e-acb2-8899ac6984f8","Type":"ContainerDied","Data":"1817fb60d75199719c2452f46c946a54d568d281db6ab071e0d9f841a62f6949"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.390641 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1817fb60d75199719c2452f46c946a54d568d281db6ab071e0d9f841a62f6949" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.400839 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="63d17d14b7b387446e6790d2542f732de1df551f140ddd4727c4084d5078e4ad" exitCode=0 Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.400916 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"63d17d14b7b387446e6790d2542f732de1df551f140ddd4727c4084d5078e4ad"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.400960 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.400978 4954 scope.go:117] "RemoveContainer" containerID="01dcf9beb4ee4352c09c120b16a87c14a68604f09bcc07e4fe16753903887aac" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.409713 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lpwkg" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.409705 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lpwkg" event={"ID":"ee22e0b0-27ab-4365-9ccc-6b563eb44b3a","Type":"ContainerDied","Data":"28b931db958456eb02fcec7901b751e0e197c9c7b3e05cc68218e02c81d097c7"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.409977 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28b931db958456eb02fcec7901b751e0e197c9c7b3e05cc68218e02c81d097c7" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.421784 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-528b-account-create-update-g72ls" event={"ID":"7b278f77-7514-4f2d-8db4-f9f209dc33dd","Type":"ContainerDied","Data":"26a00caacc9e5cfb53dd422178d5e0de2d78475a2a4fde8e79d5449f16f9c003"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.421833 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26a00caacc9e5cfb53dd422178d5e0de2d78475a2a4fde8e79d5449f16f9c003" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.421907 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-528b-account-create-update-g72ls" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.444963 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerStarted","Data":"92fd4997ee2b111d43f9b782f5b8868f27759f4c0135ee2517eb6b09cc16b51d"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.448762 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-qtp2z" event={"ID":"dac05eb0-cb63-47c0-8542-0e33bc1d8963","Type":"ContainerDied","Data":"77605ad6a4c3666dda5f056ab72d7c4ee1d207ce11cbde91ea444614103e29e5"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.449031 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77605ad6a4c3666dda5f056ab72d7c4ee1d207ce11cbde91ea444614103e29e5" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.449084 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-qtp2z" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.453893 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dqk5s" event={"ID":"9c07c750-8357-4ba2-a802-86a6f90ec496","Type":"ContainerDied","Data":"a7b514ff7f87d39410f2dcff95f22d698419db044023f50575ef328894905d57"} Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.453929 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7b514ff7f87d39410f2dcff95f22d698419db044023f50575ef328894905d57" Nov 28 16:34:03 crc kubenswrapper[4954]: I1128 16:34:03.453981 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dqk5s" Nov 28 16:34:05 crc kubenswrapper[4954]: I1128 16:34:05.478354 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerStarted","Data":"28b0c6fefb2c4d8bb10c802d05dfc01589b7044c91a2edf3fd50297960321c66"} Nov 28 16:34:05 crc kubenswrapper[4954]: I1128 16:34:05.479876 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:34:05 crc kubenswrapper[4954]: I1128 16:34:05.478646 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="sg-core" containerID="cri-o://92fd4997ee2b111d43f9b782f5b8868f27759f4c0135ee2517eb6b09cc16b51d" gracePeriod=30 Nov 28 16:34:05 crc kubenswrapper[4954]: I1128 16:34:05.478556 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-central-agent" containerID="cri-o://3b5d5fde27f231ec519243cda1d3ddd769a8b433bc89a3eee13c5121be99e7dc" gracePeriod=30 Nov 28 16:34:05 crc kubenswrapper[4954]: I1128 16:34:05.478720 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-notification-agent" containerID="cri-o://dd861baba89b530fcd7633432d74954d3f22083630d3a619809128eaa8d58384" gracePeriod=30 Nov 28 16:34:05 crc kubenswrapper[4954]: I1128 16:34:05.478704 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="proxy-httpd" containerID="cri-o://28b0c6fefb2c4d8bb10c802d05dfc01589b7044c91a2edf3fd50297960321c66" gracePeriod=30 Nov 28 16:34:05 crc kubenswrapper[4954]: I1128 16:34:05.523597 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.873863517 podStartE2EDuration="18.523576353s" podCreationTimestamp="2025-11-28 16:33:47 +0000 UTC" firstStartedPulling="2025-11-28 16:33:48.849914176 +0000 UTC m=+1382.241582717" lastFinishedPulling="2025-11-28 16:34:04.499627012 +0000 UTC m=+1397.891295553" observedRunningTime="2025-11-28 16:34:05.506291627 +0000 UTC m=+1398.897960168" watchObservedRunningTime="2025-11-28 16:34:05.523576353 +0000 UTC m=+1398.915244904" Nov 28 16:34:06 crc kubenswrapper[4954]: I1128 16:34:06.494113 4954 generic.go:334] "Generic (PLEG): container finished" podID="6354503c-292f-4372-866e-6a627c56c216" containerID="28b0c6fefb2c4d8bb10c802d05dfc01589b7044c91a2edf3fd50297960321c66" exitCode=0 Nov 28 16:34:06 crc kubenswrapper[4954]: I1128 16:34:06.494145 4954 generic.go:334] "Generic (PLEG): container finished" podID="6354503c-292f-4372-866e-6a627c56c216" containerID="92fd4997ee2b111d43f9b782f5b8868f27759f4c0135ee2517eb6b09cc16b51d" exitCode=2 Nov 28 16:34:06 crc kubenswrapper[4954]: I1128 16:34:06.494155 4954 generic.go:334] "Generic (PLEG): container finished" podID="6354503c-292f-4372-866e-6a627c56c216" containerID="dd861baba89b530fcd7633432d74954d3f22083630d3a619809128eaa8d58384" exitCode=0 Nov 28 16:34:06 crc kubenswrapper[4954]: I1128 16:34:06.494177 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerDied","Data":"28b0c6fefb2c4d8bb10c802d05dfc01589b7044c91a2edf3fd50297960321c66"} Nov 28 16:34:06 crc kubenswrapper[4954]: I1128 16:34:06.494206 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerDied","Data":"92fd4997ee2b111d43f9b782f5b8868f27759f4c0135ee2517eb6b09cc16b51d"} Nov 28 16:34:06 crc kubenswrapper[4954]: I1128 16:34:06.494218 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerDied","Data":"dd861baba89b530fcd7633432d74954d3f22083630d3a619809128eaa8d58384"} Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.505839 4954 generic.go:334] "Generic (PLEG): container finished" podID="6354503c-292f-4372-866e-6a627c56c216" containerID="3b5d5fde27f231ec519243cda1d3ddd769a8b433bc89a3eee13c5121be99e7dc" exitCode=0 Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.506103 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerDied","Data":"3b5d5fde27f231ec519243cda1d3ddd769a8b433bc89a3eee13c5121be99e7dc"} Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.651377 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.764672 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-sg-core-conf-yaml\") pod \"6354503c-292f-4372-866e-6a627c56c216\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.764731 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzv2r\" (UniqueName: \"kubernetes.io/projected/6354503c-292f-4372-866e-6a627c56c216-kube-api-access-pzv2r\") pod \"6354503c-292f-4372-866e-6a627c56c216\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.764846 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-scripts\") pod \"6354503c-292f-4372-866e-6a627c56c216\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.765602 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-combined-ca-bundle\") pod \"6354503c-292f-4372-866e-6a627c56c216\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.765672 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-log-httpd\") pod \"6354503c-292f-4372-866e-6a627c56c216\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.765695 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-config-data\") pod \"6354503c-292f-4372-866e-6a627c56c216\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.765755 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-run-httpd\") pod \"6354503c-292f-4372-866e-6a627c56c216\" (UID: \"6354503c-292f-4372-866e-6a627c56c216\") " Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.766397 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6354503c-292f-4372-866e-6a627c56c216" (UID: "6354503c-292f-4372-866e-6a627c56c216"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.766590 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6354503c-292f-4372-866e-6a627c56c216" (UID: "6354503c-292f-4372-866e-6a627c56c216"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.771703 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-scripts" (OuterVolumeSpecName: "scripts") pod "6354503c-292f-4372-866e-6a627c56c216" (UID: "6354503c-292f-4372-866e-6a627c56c216"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.773201 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6354503c-292f-4372-866e-6a627c56c216-kube-api-access-pzv2r" (OuterVolumeSpecName: "kube-api-access-pzv2r") pod "6354503c-292f-4372-866e-6a627c56c216" (UID: "6354503c-292f-4372-866e-6a627c56c216"). InnerVolumeSpecName "kube-api-access-pzv2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.804642 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6354503c-292f-4372-866e-6a627c56c216" (UID: "6354503c-292f-4372-866e-6a627c56c216"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.867228 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.867265 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzv2r\" (UniqueName: \"kubernetes.io/projected/6354503c-292f-4372-866e-6a627c56c216-kube-api-access-pzv2r\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.867280 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.867291 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.867301 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6354503c-292f-4372-866e-6a627c56c216-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.868716 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6354503c-292f-4372-866e-6a627c56c216" (UID: "6354503c-292f-4372-866e-6a627c56c216"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.902609 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-config-data" (OuterVolumeSpecName: "config-data") pod "6354503c-292f-4372-866e-6a627c56c216" (UID: "6354503c-292f-4372-866e-6a627c56c216"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.969414 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:07 crc kubenswrapper[4954]: I1128 16:34:07.969449 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6354503c-292f-4372-866e-6a627c56c216-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.341822 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-twcpm"] Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.342614 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ebb6843-0b32-449c-9b8f-d817c3690e67" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.342736 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ebb6843-0b32-449c-9b8f-d817c3690e67" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.342819 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c07c750-8357-4ba2-a802-86a6f90ec496" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.342887 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c07c750-8357-4ba2-a802-86a6f90ec496" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.342962 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dac05eb0-cb63-47c0-8542-0e33bc1d8963" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.343024 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="dac05eb0-cb63-47c0-8542-0e33bc1d8963" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.343093 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="proxy-httpd" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.343167 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="proxy-httpd" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.343244 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-notification-agent" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.343325 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-notification-agent" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.343402 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b278f77-7514-4f2d-8db4-f9f209dc33dd" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.343469 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b278f77-7514-4f2d-8db4-f9f209dc33dd" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.343571 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-central-agent" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.343663 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-central-agent" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.343750 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-api" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.343833 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-api" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.343911 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="sg-core" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.343978 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="sg-core" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.344051 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d62a7c52-13eb-499e-acb2-8899ac6984f8" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.344130 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d62a7c52-13eb-499e-acb2-8899ac6984f8" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.344213 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-httpd" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.344278 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-httpd" Nov 28 16:34:08 crc kubenswrapper[4954]: E1128 16:34:08.344352 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee22e0b0-27ab-4365-9ccc-6b563eb44b3a" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.344413 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee22e0b0-27ab-4365-9ccc-6b563eb44b3a" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.344759 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ebb6843-0b32-449c-9b8f-d817c3690e67" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.344852 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c07c750-8357-4ba2-a802-86a6f90ec496" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.344940 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-api" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345019 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d62a7c52-13eb-499e-acb2-8899ac6984f8" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345092 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b278f77-7514-4f2d-8db4-f9f209dc33dd" containerName="mariadb-account-create-update" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345161 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="dac05eb0-cb63-47c0-8542-0e33bc1d8963" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345245 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="sg-core" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345321 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-central-agent" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345391 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee22e0b0-27ab-4365-9ccc-6b563eb44b3a" containerName="mariadb-database-create" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345464 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ee4f765-e243-4661-b118-dbf3414cfb2e" containerName="neutron-httpd" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345540 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="ceilometer-notification-agent" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.345636 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6354503c-292f-4372-866e-6a627c56c216" containerName="proxy-httpd" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.346525 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.352582 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.352896 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4nkxs" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.353124 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.354112 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-twcpm"] Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.477843 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-config-data\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.477936 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-scripts\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.478008 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.478031 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj2lv\" (UniqueName: \"kubernetes.io/projected/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-kube-api-access-nj2lv\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.521712 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6354503c-292f-4372-866e-6a627c56c216","Type":"ContainerDied","Data":"bdf81a473c1f178c24e735cdd087c3d73185fe808434af43ca21e876314ee182"} Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.521768 4954 scope.go:117] "RemoveContainer" containerID="28b0c6fefb2c4d8bb10c802d05dfc01589b7044c91a2edf3fd50297960321c66" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.521913 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.551721 4954 scope.go:117] "RemoveContainer" containerID="92fd4997ee2b111d43f9b782f5b8868f27759f4c0135ee2517eb6b09cc16b51d" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.565692 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.578622 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.579705 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.579765 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj2lv\" (UniqueName: \"kubernetes.io/projected/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-kube-api-access-nj2lv\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.579879 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-config-data\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.579973 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-scripts\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.591272 4954 scope.go:117] "RemoveContainer" containerID="dd861baba89b530fcd7633432d74954d3f22083630d3a619809128eaa8d58384" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.593463 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.601111 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-config-data\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.601566 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-scripts\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.609074 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj2lv\" (UniqueName: \"kubernetes.io/projected/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-kube-api-access-nj2lv\") pod \"nova-cell0-conductor-db-sync-twcpm\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.618346 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.622458 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.625250 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.625380 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.626532 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.679965 4954 scope.go:117] "RemoveContainer" containerID="3b5d5fde27f231ec519243cda1d3ddd769a8b433bc89a3eee13c5121be99e7dc" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.694013 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.784642 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.784759 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-log-httpd\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.784836 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-scripts\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.785026 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-run-httpd\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.785236 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-config-data\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.785305 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.785394 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7pcw\" (UniqueName: \"kubernetes.io/projected/018470bf-5b43-46ea-a83c-6742103b947f-kube-api-access-h7pcw\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.886782 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.886849 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-log-httpd\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.886885 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-scripts\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.886973 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-run-httpd\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.887010 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-config-data\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.887031 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.887052 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7pcw\" (UniqueName: \"kubernetes.io/projected/018470bf-5b43-46ea-a83c-6742103b947f-kube-api-access-h7pcw\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.887648 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-log-httpd\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.887707 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-run-httpd\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.892779 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-config-data\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.893384 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-scripts\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.894342 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.895591 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.905115 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7pcw\" (UniqueName: \"kubernetes.io/projected/018470bf-5b43-46ea-a83c-6742103b947f-kube-api-access-h7pcw\") pod \"ceilometer-0\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " pod="openstack/ceilometer-0" Nov 28 16:34:08 crc kubenswrapper[4954]: I1128 16:34:08.985951 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:09 crc kubenswrapper[4954]: W1128 16:34:09.146508 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c6ac4f8_3168_46d6_9adc_d6ea0b2f3780.slice/crio-1bdeebf37dce6be8d3d7d887015e6705acb3353d16443b69c3b934bcb154c60b WatchSource:0}: Error finding container 1bdeebf37dce6be8d3d7d887015e6705acb3353d16443b69c3b934bcb154c60b: Status 404 returned error can't find the container with id 1bdeebf37dce6be8d3d7d887015e6705acb3353d16443b69c3b934bcb154c60b Nov 28 16:34:09 crc kubenswrapper[4954]: I1128 16:34:09.148397 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-twcpm"] Nov 28 16:34:09 crc kubenswrapper[4954]: I1128 16:34:09.426022 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:09 crc kubenswrapper[4954]: I1128 16:34:09.533920 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-twcpm" event={"ID":"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780","Type":"ContainerStarted","Data":"1bdeebf37dce6be8d3d7d887015e6705acb3353d16443b69c3b934bcb154c60b"} Nov 28 16:34:09 crc kubenswrapper[4954]: I1128 16:34:09.535158 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerStarted","Data":"ecbe49316f61478271e2a2f71bc79aa9547174d677c3b438ff4558ec97c3a3ca"} Nov 28 16:34:09 crc kubenswrapper[4954]: I1128 16:34:09.866662 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6354503c-292f-4372-866e-6a627c56c216" path="/var/lib/kubelet/pods/6354503c-292f-4372-866e-6a627c56c216/volumes" Nov 28 16:34:10 crc kubenswrapper[4954]: I1128 16:34:10.259999 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:34:10 crc kubenswrapper[4954]: I1128 16:34:10.260906 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-log" containerID="cri-o://0591103a92fe55b5aeaf7003da7124790b8840238c9ef95b8d6759987d2cbc7d" gracePeriod=30 Nov 28 16:34:10 crc kubenswrapper[4954]: I1128 16:34:10.261109 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-httpd" containerID="cri-o://953e5aa2abeba459aed01ff7c56aad0fff93023dad7689d774e0c66ccf4b3866" gracePeriod=30 Nov 28 16:34:10 crc kubenswrapper[4954]: I1128 16:34:10.552472 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerStarted","Data":"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe"} Nov 28 16:34:10 crc kubenswrapper[4954]: I1128 16:34:10.558221 4954 generic.go:334] "Generic (PLEG): container finished" podID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerID="0591103a92fe55b5aeaf7003da7124790b8840238c9ef95b8d6759987d2cbc7d" exitCode=143 Nov 28 16:34:10 crc kubenswrapper[4954]: I1128 16:34:10.558265 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"64c2cd67-7241-451a-8002-2cf34bfccd66","Type":"ContainerDied","Data":"0591103a92fe55b5aeaf7003da7124790b8840238c9ef95b8d6759987d2cbc7d"} Nov 28 16:34:11 crc kubenswrapper[4954]: I1128 16:34:11.062860 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:34:11 crc kubenswrapper[4954]: I1128 16:34:11.063444 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-log" containerID="cri-o://ae66ea5bf4f0a053883318ea718224085c3f36d52e10c3bd4f3a097c2bdd7db0" gracePeriod=30 Nov 28 16:34:11 crc kubenswrapper[4954]: I1128 16:34:11.063586 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-httpd" containerID="cri-o://d8e42a4506f0655575814f190360ca846d0c0bd0208364aa42a9f2af88ebdc26" gracePeriod=30 Nov 28 16:34:11 crc kubenswrapper[4954]: I1128 16:34:11.576268 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerStarted","Data":"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832"} Nov 28 16:34:11 crc kubenswrapper[4954]: I1128 16:34:11.583017 4954 generic.go:334] "Generic (PLEG): container finished" podID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerID="ae66ea5bf4f0a053883318ea718224085c3f36d52e10c3bd4f3a097c2bdd7db0" exitCode=143 Nov 28 16:34:11 crc kubenswrapper[4954]: I1128 16:34:11.583073 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3fb8465-a03c-4461-b334-42b1b20134c9","Type":"ContainerDied","Data":"ae66ea5bf4f0a053883318ea718224085c3f36d52e10c3bd4f3a097c2bdd7db0"} Nov 28 16:34:11 crc kubenswrapper[4954]: I1128 16:34:11.896321 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:12 crc kubenswrapper[4954]: I1128 16:34:12.596208 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerStarted","Data":"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787"} Nov 28 16:34:13 crc kubenswrapper[4954]: I1128 16:34:13.608878 4954 generic.go:334] "Generic (PLEG): container finished" podID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerID="953e5aa2abeba459aed01ff7c56aad0fff93023dad7689d774e0c66ccf4b3866" exitCode=0 Nov 28 16:34:13 crc kubenswrapper[4954]: I1128 16:34:13.608950 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"64c2cd67-7241-451a-8002-2cf34bfccd66","Type":"ContainerDied","Data":"953e5aa2abeba459aed01ff7c56aad0fff93023dad7689d774e0c66ccf4b3866"} Nov 28 16:34:14 crc kubenswrapper[4954]: I1128 16:34:14.622453 4954 generic.go:334] "Generic (PLEG): container finished" podID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerID="d8e42a4506f0655575814f190360ca846d0c0bd0208364aa42a9f2af88ebdc26" exitCode=0 Nov 28 16:34:14 crc kubenswrapper[4954]: I1128 16:34:14.622543 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3fb8465-a03c-4461-b334-42b1b20134c9","Type":"ContainerDied","Data":"d8e42a4506f0655575814f190360ca846d0c0bd0208364aa42a9f2af88ebdc26"} Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.573105 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.595686 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.672913 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"64c2cd67-7241-451a-8002-2cf34bfccd66","Type":"ContainerDied","Data":"f8d827ca9184bf882b0276fa7d72b76fb897653bc597b276fbbe7e60f3e48fa6"} Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.672992 4954 scope.go:117] "RemoveContainer" containerID="953e5aa2abeba459aed01ff7c56aad0fff93023dad7689d774e0c66ccf4b3866" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.673115 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.680069 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3fb8465-a03c-4461-b334-42b1b20134c9","Type":"ContainerDied","Data":"46d540bab171bf5d9bc29b6f3bf2acd912c6fdb5ed6f7721aba6603fe9f2b43d"} Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.680204 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698618 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698697 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-combined-ca-bundle\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698734 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-logs\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698761 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-httpd-run\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698796 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-httpd-run\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698894 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-scripts\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698916 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcrwd\" (UniqueName: \"kubernetes.io/projected/64c2cd67-7241-451a-8002-2cf34bfccd66-kube-api-access-xcrwd\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698940 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-config-data\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.698998 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jshh2\" (UniqueName: \"kubernetes.io/projected/c3fb8465-a03c-4461-b334-42b1b20134c9-kube-api-access-jshh2\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.699061 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-scripts\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.699087 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-config-data\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.699139 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-internal-tls-certs\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.699154 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.699210 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-public-tls-certs\") pod \"64c2cd67-7241-451a-8002-2cf34bfccd66\" (UID: \"64c2cd67-7241-451a-8002-2cf34bfccd66\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.699235 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-logs\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.699283 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-combined-ca-bundle\") pod \"c3fb8465-a03c-4461-b334-42b1b20134c9\" (UID: \"c3fb8465-a03c-4461-b334-42b1b20134c9\") " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.700125 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-logs" (OuterVolumeSpecName: "logs") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.707865 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.707995 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-logs" (OuterVolumeSpecName: "logs") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.708056 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.715303 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.732942 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64c2cd67-7241-451a-8002-2cf34bfccd66-kube-api-access-xcrwd" (OuterVolumeSpecName: "kube-api-access-xcrwd") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "kube-api-access-xcrwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.735330 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-scripts" (OuterVolumeSpecName: "scripts") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.738347 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-scripts" (OuterVolumeSpecName: "scripts") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.738941 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.743278 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3fb8465-a03c-4461-b334-42b1b20134c9-kube-api-access-jshh2" (OuterVolumeSpecName: "kube-api-access-jshh2") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "kube-api-access-jshh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.774484 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808349 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808367 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808375 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcrwd\" (UniqueName: \"kubernetes.io/projected/64c2cd67-7241-451a-8002-2cf34bfccd66-kube-api-access-xcrwd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808385 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808393 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jshh2\" (UniqueName: \"kubernetes.io/projected/c3fb8465-a03c-4461-b334-42b1b20134c9-kube-api-access-jshh2\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808402 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808420 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808428 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3fb8465-a03c-4461-b334-42b1b20134c9-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808440 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808449 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.808457 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64c2cd67-7241-451a-8002-2cf34bfccd66-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.853751 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.910330 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:18 crc kubenswrapper[4954]: I1128 16:34:18.926790 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.012258 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.044084 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.095696 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.114567 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.114614 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.114978 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.125833 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-config-data" (OuterVolumeSpecName: "config-data") pod "64c2cd67-7241-451a-8002-2cf34bfccd66" (UID: "64c2cd67-7241-451a-8002-2cf34bfccd66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.141752 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-config-data" (OuterVolumeSpecName: "config-data") pod "c3fb8465-a03c-4461-b334-42b1b20134c9" (UID: "c3fb8465-a03c-4461-b334-42b1b20134c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.191553 4954 scope.go:117] "RemoveContainer" containerID="0591103a92fe55b5aeaf7003da7124790b8840238c9ef95b8d6759987d2cbc7d" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.214333 4954 scope.go:117] "RemoveContainer" containerID="d8e42a4506f0655575814f190360ca846d0c0bd0208364aa42a9f2af88ebdc26" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.215751 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3fb8465-a03c-4461-b334-42b1b20134c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.215784 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.215811 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/64c2cd67-7241-451a-8002-2cf34bfccd66-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.243613 4954 scope.go:117] "RemoveContainer" containerID="ae66ea5bf4f0a053883318ea718224085c3f36d52e10c3bd4f3a097c2bdd7db0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.335400 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.344296 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.361582 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: E1128 16:34:19.361979 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-log" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362004 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-log" Nov 28 16:34:19 crc kubenswrapper[4954]: E1128 16:34:19.362045 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-httpd" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362054 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-httpd" Nov 28 16:34:19 crc kubenswrapper[4954]: E1128 16:34:19.362082 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-log" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362089 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-log" Nov 28 16:34:19 crc kubenswrapper[4954]: E1128 16:34:19.362099 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-httpd" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362106 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-httpd" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362317 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-log" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362342 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-httpd" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362359 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" containerName="glance-log" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.362368 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" containerName="glance-httpd" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.363420 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.366487 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.372836 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.373272 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.385320 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-z8dvv" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.407586 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.414761 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.422219 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.444556 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.445923 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.452970 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.453350 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.471756 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528143 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528222 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528250 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528284 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-scripts\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528340 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528373 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-config-data\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528420 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn5s6\" (UniqueName: \"kubernetes.io/projected/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-kube-api-access-jn5s6\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.528472 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-logs\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.629954 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630011 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630039 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630076 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-scripts\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630102 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630134 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630157 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-config-data\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630188 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn5s6\" (UniqueName: \"kubernetes.io/projected/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-kube-api-access-jn5s6\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630205 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630232 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630254 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmwss\" (UniqueName: \"kubernetes.io/projected/1c362e30-8109-411f-9f89-21c7c28da6c2-kube-api-access-cmwss\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630273 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630302 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-logs\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630327 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630350 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630379 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-logs\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630513 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630779 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.630844 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-logs\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.644489 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-scripts\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.644515 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.644681 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-config-data\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.654233 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.661354 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn5s6\" (UniqueName: \"kubernetes.io/projected/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-kube-api-access-jn5s6\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.672967 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-external-api-0\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.691313 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-twcpm" event={"ID":"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780","Type":"ContainerStarted","Data":"b10557bf3df3409a81c72de26c7fe710e7c9b5f93df08097a1eabd4e26aa933a"} Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.694280 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerStarted","Data":"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2"} Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.694396 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-central-agent" containerID="cri-o://e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe" gracePeriod=30 Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.694462 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="proxy-httpd" containerID="cri-o://8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2" gracePeriod=30 Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.694477 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-notification-agent" containerID="cri-o://6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832" gracePeriod=30 Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.694438 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="sg-core" containerID="cri-o://4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787" gracePeriod=30 Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.694463 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.698117 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732577 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732646 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-logs\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732677 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732753 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732807 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732836 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732864 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmwss\" (UniqueName: \"kubernetes.io/projected/1c362e30-8109-411f-9f89-21c7c28da6c2-kube-api-access-cmwss\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.732880 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.734039 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-logs\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.734259 4954 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.734596 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.740061 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.745940 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.748837 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.755173 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.761351 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-twcpm" podStartSLOduration=2.512350722 podStartE2EDuration="11.76133261s" podCreationTimestamp="2025-11-28 16:34:08 +0000 UTC" firstStartedPulling="2025-11-28 16:34:09.14909948 +0000 UTC m=+1402.540768021" lastFinishedPulling="2025-11-28 16:34:18.398081368 +0000 UTC m=+1411.789749909" observedRunningTime="2025-11-28 16:34:19.754769843 +0000 UTC m=+1413.146438394" watchObservedRunningTime="2025-11-28 16:34:19.76133261 +0000 UTC m=+1413.153001151" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.773299 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmwss\" (UniqueName: \"kubernetes.io/projected/1c362e30-8109-411f-9f89-21c7c28da6c2-kube-api-access-cmwss\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.791567 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.884178799 podStartE2EDuration="11.791545515s" podCreationTimestamp="2025-11-28 16:34:08 +0000 UTC" firstStartedPulling="2025-11-28 16:34:09.422345523 +0000 UTC m=+1402.814014064" lastFinishedPulling="2025-11-28 16:34:18.329712239 +0000 UTC m=+1411.721380780" observedRunningTime="2025-11-28 16:34:19.789055085 +0000 UTC m=+1413.180723636" watchObservedRunningTime="2025-11-28 16:34:19.791545515 +0000 UTC m=+1413.183214066" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.792356 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.868681 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64c2cd67-7241-451a-8002-2cf34bfccd66" path="/var/lib/kubelet/pods/64c2cd67-7241-451a-8002-2cf34bfccd66/volumes" Nov 28 16:34:19 crc kubenswrapper[4954]: I1128 16:34:19.869473 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3fb8465-a03c-4461-b334-42b1b20134c9" path="/var/lib/kubelet/pods/c3fb8465-a03c-4461-b334-42b1b20134c9/volumes" Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.067865 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.496414 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.740726 4954 generic.go:334] "Generic (PLEG): container finished" podID="018470bf-5b43-46ea-a83c-6742103b947f" containerID="8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2" exitCode=0 Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.740755 4954 generic.go:334] "Generic (PLEG): container finished" podID="018470bf-5b43-46ea-a83c-6742103b947f" containerID="4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787" exitCode=2 Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.740790 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerDied","Data":"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2"} Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.740815 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerDied","Data":"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787"} Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.742485 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf","Type":"ContainerStarted","Data":"5f951126b4ad54dbec1f2f72816800f72e796ac41c13f2e702b000878cc7abc6"} Nov 28 16:34:20 crc kubenswrapper[4954]: I1128 16:34:20.805426 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:34:21 crc kubenswrapper[4954]: I1128 16:34:21.753038 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c362e30-8109-411f-9f89-21c7c28da6c2","Type":"ContainerStarted","Data":"c8496aa73e00fffae11f764f6f8e07ac7bb4a380c7dd119a8dda5aedbb83d547"} Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.702880 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.768209 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf","Type":"ContainerStarted","Data":"64d4cc73b002f732d7801401e3ff287905c1abfeedbf8f510d4caf3a46f41b10"} Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.771382 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c362e30-8109-411f-9f89-21c7c28da6c2","Type":"ContainerStarted","Data":"755de0385bf4e11b5c8e1c31c08371669daa12dda25d66b8c68b992fd6205979"} Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.777882 4954 generic.go:334] "Generic (PLEG): container finished" podID="018470bf-5b43-46ea-a83c-6742103b947f" containerID="6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832" exitCode=0 Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.777920 4954 generic.go:334] "Generic (PLEG): container finished" podID="018470bf-5b43-46ea-a83c-6742103b947f" containerID="e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe" exitCode=0 Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.777945 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerDied","Data":"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832"} Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.777979 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerDied","Data":"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe"} Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.777994 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"018470bf-5b43-46ea-a83c-6742103b947f","Type":"ContainerDied","Data":"ecbe49316f61478271e2a2f71bc79aa9547174d677c3b438ff4558ec97c3a3ca"} Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.778014 4954 scope.go:117] "RemoveContainer" containerID="8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.778210 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.786609 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-config-data\") pod \"018470bf-5b43-46ea-a83c-6742103b947f\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.786763 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-combined-ca-bundle\") pod \"018470bf-5b43-46ea-a83c-6742103b947f\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.786867 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-scripts\") pod \"018470bf-5b43-46ea-a83c-6742103b947f\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.786915 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-sg-core-conf-yaml\") pod \"018470bf-5b43-46ea-a83c-6742103b947f\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.786981 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-run-httpd\") pod \"018470bf-5b43-46ea-a83c-6742103b947f\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.787015 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7pcw\" (UniqueName: \"kubernetes.io/projected/018470bf-5b43-46ea-a83c-6742103b947f-kube-api-access-h7pcw\") pod \"018470bf-5b43-46ea-a83c-6742103b947f\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.787049 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-log-httpd\") pod \"018470bf-5b43-46ea-a83c-6742103b947f\" (UID: \"018470bf-5b43-46ea-a83c-6742103b947f\") " Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.788260 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "018470bf-5b43-46ea-a83c-6742103b947f" (UID: "018470bf-5b43-46ea-a83c-6742103b947f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.790562 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "018470bf-5b43-46ea-a83c-6742103b947f" (UID: "018470bf-5b43-46ea-a83c-6742103b947f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.798339 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-scripts" (OuterVolumeSpecName: "scripts") pod "018470bf-5b43-46ea-a83c-6742103b947f" (UID: "018470bf-5b43-46ea-a83c-6742103b947f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.803942 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/018470bf-5b43-46ea-a83c-6742103b947f-kube-api-access-h7pcw" (OuterVolumeSpecName: "kube-api-access-h7pcw") pod "018470bf-5b43-46ea-a83c-6742103b947f" (UID: "018470bf-5b43-46ea-a83c-6742103b947f"). InnerVolumeSpecName "kube-api-access-h7pcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.810199 4954 scope.go:117] "RemoveContainer" containerID="4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.827896 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "018470bf-5b43-46ea-a83c-6742103b947f" (UID: "018470bf-5b43-46ea-a83c-6742103b947f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.835964 4954 scope.go:117] "RemoveContainer" containerID="6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.874815 4954 scope.go:117] "RemoveContainer" containerID="e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.889345 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.889408 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7pcw\" (UniqueName: \"kubernetes.io/projected/018470bf-5b43-46ea-a83c-6742103b947f-kube-api-access-h7pcw\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.889421 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/018470bf-5b43-46ea-a83c-6742103b947f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.889431 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.889442 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.902320 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "018470bf-5b43-46ea-a83c-6742103b947f" (UID: "018470bf-5b43-46ea-a83c-6742103b947f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.903055 4954 scope.go:117] "RemoveContainer" containerID="8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2" Nov 28 16:34:22 crc kubenswrapper[4954]: E1128 16:34:22.903664 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2\": container with ID starting with 8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2 not found: ID does not exist" containerID="8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.903700 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2"} err="failed to get container status \"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2\": rpc error: code = NotFound desc = could not find container \"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2\": container with ID starting with 8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2 not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.903726 4954 scope.go:117] "RemoveContainer" containerID="4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787" Nov 28 16:34:22 crc kubenswrapper[4954]: E1128 16:34:22.904082 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787\": container with ID starting with 4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787 not found: ID does not exist" containerID="4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904102 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787"} err="failed to get container status \"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787\": rpc error: code = NotFound desc = could not find container \"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787\": container with ID starting with 4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787 not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904120 4954 scope.go:117] "RemoveContainer" containerID="6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832" Nov 28 16:34:22 crc kubenswrapper[4954]: E1128 16:34:22.904376 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832\": container with ID starting with 6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832 not found: ID does not exist" containerID="6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904397 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832"} err="failed to get container status \"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832\": rpc error: code = NotFound desc = could not find container \"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832\": container with ID starting with 6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832 not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904413 4954 scope.go:117] "RemoveContainer" containerID="e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe" Nov 28 16:34:22 crc kubenswrapper[4954]: E1128 16:34:22.904650 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe\": container with ID starting with e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe not found: ID does not exist" containerID="e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904672 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe"} err="failed to get container status \"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe\": rpc error: code = NotFound desc = could not find container \"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe\": container with ID starting with e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904686 4954 scope.go:117] "RemoveContainer" containerID="8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904895 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2"} err="failed to get container status \"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2\": rpc error: code = NotFound desc = could not find container \"8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2\": container with ID starting with 8d56b00d78b0f938b58629c3484f2e3849ad4fc08eb93346e79f722be9e65ad2 not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.904915 4954 scope.go:117] "RemoveContainer" containerID="4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.905173 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787"} err="failed to get container status \"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787\": rpc error: code = NotFound desc = could not find container \"4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787\": container with ID starting with 4b1621d2968d8ba9395f03d9b62c3fa09396de08632864ed324f8a83d217c787 not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.905191 4954 scope.go:117] "RemoveContainer" containerID="6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.905389 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832"} err="failed to get container status \"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832\": rpc error: code = NotFound desc = could not find container \"6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832\": container with ID starting with 6b2249f07ab5a340b8e5500ab0830bf7ae7f7f7ef6d2429e9a45b09bad487832 not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.905405 4954 scope.go:117] "RemoveContainer" containerID="e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.905745 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe"} err="failed to get container status \"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe\": rpc error: code = NotFound desc = could not find container \"e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe\": container with ID starting with e7755e97aa55ecd0e91046c73aa9d24b73a719cf2038babccb802b340aad5dfe not found: ID does not exist" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.910032 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-config-data" (OuterVolumeSpecName: "config-data") pod "018470bf-5b43-46ea-a83c-6742103b947f" (UID: "018470bf-5b43-46ea-a83c-6742103b947f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.992196 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:22 crc kubenswrapper[4954]: I1128 16:34:22.992229 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/018470bf-5b43-46ea-a83c-6742103b947f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.118338 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.130662 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.139026 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:23 crc kubenswrapper[4954]: E1128 16:34:23.139462 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-central-agent" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.139490 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-central-agent" Nov 28 16:34:23 crc kubenswrapper[4954]: E1128 16:34:23.139503 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="sg-core" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.139509 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="sg-core" Nov 28 16:34:23 crc kubenswrapper[4954]: E1128 16:34:23.139542 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-notification-agent" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.139549 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-notification-agent" Nov 28 16:34:23 crc kubenswrapper[4954]: E1128 16:34:23.139560 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="proxy-httpd" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.139567 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="proxy-httpd" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.141583 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-central-agent" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.141624 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="proxy-httpd" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.141633 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="sg-core" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.141647 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="018470bf-5b43-46ea-a83c-6742103b947f" containerName="ceilometer-notification-agent" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.144228 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.146230 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.147873 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.154377 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.194810 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.195183 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-log-httpd\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.195308 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-scripts\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.195448 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjbrn\" (UniqueName: \"kubernetes.io/projected/c1e54c34-f8bf-426d-bef1-9e5655376dd0-kube-api-access-hjbrn\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.195588 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.195710 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-run-httpd\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.195931 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-config-data\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297206 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297294 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-log-httpd\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297322 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-scripts\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297345 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjbrn\" (UniqueName: \"kubernetes.io/projected/c1e54c34-f8bf-426d-bef1-9e5655376dd0-kube-api-access-hjbrn\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297370 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297775 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-run-httpd\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297860 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-config-data\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.297873 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-log-httpd\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.298183 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-run-httpd\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.301923 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-scripts\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.302404 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.303177 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.304497 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-config-data\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.313877 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjbrn\" (UniqueName: \"kubernetes.io/projected/c1e54c34-f8bf-426d-bef1-9e5655376dd0-kube-api-access-hjbrn\") pod \"ceilometer-0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.488405 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.788277 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf","Type":"ContainerStarted","Data":"0a58c23a2643ee01442026f1cd5c2931589d7ded1516de8835e9e562824e613e"} Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.868087 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="018470bf-5b43-46ea-a83c-6742103b947f" path="/var/lib/kubelet/pods/018470bf-5b43-46ea-a83c-6742103b947f/volumes" Nov 28 16:34:23 crc kubenswrapper[4954]: I1128 16:34:23.922967 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:23 crc kubenswrapper[4954]: W1128 16:34:23.928753 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1e54c34_f8bf_426d_bef1_9e5655376dd0.slice/crio-11801b6a08172e4cd475aa6373a5598c17ce3380dc9a32f5a5073620ce332fdb WatchSource:0}: Error finding container 11801b6a08172e4cd475aa6373a5598c17ce3380dc9a32f5a5073620ce332fdb: Status 404 returned error can't find the container with id 11801b6a08172e4cd475aa6373a5598c17ce3380dc9a32f5a5073620ce332fdb Nov 28 16:34:24 crc kubenswrapper[4954]: I1128 16:34:24.800349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerStarted","Data":"11801b6a08172e4cd475aa6373a5598c17ce3380dc9a32f5a5073620ce332fdb"} Nov 28 16:34:25 crc kubenswrapper[4954]: I1128 16:34:25.720451 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:25 crc kubenswrapper[4954]: I1128 16:34:25.816025 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c362e30-8109-411f-9f89-21c7c28da6c2","Type":"ContainerStarted","Data":"bf5f47ec2adaca701e2261bfe9e2ba603aeebd57c0748bfc08cf9e1207069dde"} Nov 28 16:34:25 crc kubenswrapper[4954]: I1128 16:34:25.818155 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerStarted","Data":"6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166"} Nov 28 16:34:25 crc kubenswrapper[4954]: I1128 16:34:25.839828 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.839805106 podStartE2EDuration="6.839805106s" podCreationTimestamp="2025-11-28 16:34:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:25.837666459 +0000 UTC m=+1419.229335000" watchObservedRunningTime="2025-11-28 16:34:25.839805106 +0000 UTC m=+1419.231473657" Nov 28 16:34:25 crc kubenswrapper[4954]: I1128 16:34:25.864631 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.86461029 podStartE2EDuration="6.86461029s" podCreationTimestamp="2025-11-28 16:34:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:25.86332765 +0000 UTC m=+1419.254996201" watchObservedRunningTime="2025-11-28 16:34:25.86461029 +0000 UTC m=+1419.256278851" Nov 28 16:34:26 crc kubenswrapper[4954]: I1128 16:34:26.831981 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerStarted","Data":"9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014"} Nov 28 16:34:27 crc kubenswrapper[4954]: I1128 16:34:27.843038 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerStarted","Data":"d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e"} Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.699164 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.699557 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.745758 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.756263 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.861699 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-central-agent" containerID="cri-o://6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166" gracePeriod=30 Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.861744 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="proxy-httpd" containerID="cri-o://bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4" gracePeriod=30 Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.861765 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="sg-core" containerID="cri-o://d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e" gracePeriod=30 Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.861816 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-notification-agent" containerID="cri-o://9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014" gracePeriod=30 Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.876670 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.876707 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.876719 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerStarted","Data":"bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4"} Nov 28 16:34:29 crc kubenswrapper[4954]: I1128 16:34:29.886106 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8715220540000002 podStartE2EDuration="6.886080617s" podCreationTimestamp="2025-11-28 16:34:23 +0000 UTC" firstStartedPulling="2025-11-28 16:34:23.932593029 +0000 UTC m=+1417.324261570" lastFinishedPulling="2025-11-28 16:34:28.947151592 +0000 UTC m=+1422.338820133" observedRunningTime="2025-11-28 16:34:29.880898304 +0000 UTC m=+1423.272566855" watchObservedRunningTime="2025-11-28 16:34:29.886080617 +0000 UTC m=+1423.277749158" Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.069605 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.069698 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.110210 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.125643 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.873659 4954 generic.go:334] "Generic (PLEG): container finished" podID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerID="bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4" exitCode=0 Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.874020 4954 generic.go:334] "Generic (PLEG): container finished" podID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerID="d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e" exitCode=2 Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.874035 4954 generic.go:334] "Generic (PLEG): container finished" podID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerID="9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014" exitCode=0 Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.873715 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerDied","Data":"bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4"} Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.875386 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.875410 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerDied","Data":"d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e"} Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.875428 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerDied","Data":"9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014"} Nov 28 16:34:30 crc kubenswrapper[4954]: I1128 16:34:30.876038 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:31 crc kubenswrapper[4954]: I1128 16:34:31.814651 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:34:31 crc kubenswrapper[4954]: I1128 16:34:31.820328 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.739281 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.788029 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-sg-core-conf-yaml\") pod \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.833335 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c1e54c34-f8bf-426d-bef1-9e5655376dd0" (UID: "c1e54c34-f8bf-426d-bef1-9e5655376dd0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.890892 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-log-httpd\") pod \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.890987 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-combined-ca-bundle\") pod \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.891067 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-scripts\") pod \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.891104 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjbrn\" (UniqueName: \"kubernetes.io/projected/c1e54c34-f8bf-426d-bef1-9e5655376dd0-kube-api-access-hjbrn\") pod \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.891134 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-run-httpd\") pod \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.891169 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-config-data\") pod \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\" (UID: \"c1e54c34-f8bf-426d-bef1-9e5655376dd0\") " Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.891582 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.892345 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c1e54c34-f8bf-426d-bef1-9e5655376dd0" (UID: "c1e54c34-f8bf-426d-bef1-9e5655376dd0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.893612 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c1e54c34-f8bf-426d-bef1-9e5655376dd0" (UID: "c1e54c34-f8bf-426d-bef1-9e5655376dd0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.896354 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-scripts" (OuterVolumeSpecName: "scripts") pod "c1e54c34-f8bf-426d-bef1-9e5655376dd0" (UID: "c1e54c34-f8bf-426d-bef1-9e5655376dd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.897845 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1e54c34-f8bf-426d-bef1-9e5655376dd0-kube-api-access-hjbrn" (OuterVolumeSpecName: "kube-api-access-hjbrn") pod "c1e54c34-f8bf-426d-bef1-9e5655376dd0" (UID: "c1e54c34-f8bf-426d-bef1-9e5655376dd0"). InnerVolumeSpecName "kube-api-access-hjbrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.906278 4954 generic.go:334] "Generic (PLEG): container finished" podID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerID="6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166" exitCode=0 Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.906611 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerDied","Data":"6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166"} Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.906677 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1e54c34-f8bf-426d-bef1-9e5655376dd0","Type":"ContainerDied","Data":"11801b6a08172e4cd475aa6373a5598c17ce3380dc9a32f5a5073620ce332fdb"} Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.906704 4954 scope.go:117] "RemoveContainer" containerID="bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.907603 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.907868 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.907922 4954 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.947911 4954 scope.go:117] "RemoveContainer" containerID="d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.970967 4954 scope.go:117] "RemoveContainer" containerID="9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.988859 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1e54c34-f8bf-426d-bef1-9e5655376dd0" (UID: "c1e54c34-f8bf-426d-bef1-9e5655376dd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.994598 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.994651 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.994661 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjbrn\" (UniqueName: \"kubernetes.io/projected/c1e54c34-f8bf-426d-bef1-9e5655376dd0-kube-api-access-hjbrn\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.994672 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.994682 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1e54c34-f8bf-426d-bef1-9e5655376dd0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:32 crc kubenswrapper[4954]: I1128 16:34:32.998268 4954 scope.go:117] "RemoveContainer" containerID="6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.020059 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-config-data" (OuterVolumeSpecName: "config-data") pod "c1e54c34-f8bf-426d-bef1-9e5655376dd0" (UID: "c1e54c34-f8bf-426d-bef1-9e5655376dd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.021809 4954 scope.go:117] "RemoveContainer" containerID="bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.022087 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.022248 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4\": container with ID starting with bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4 not found: ID does not exist" containerID="bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.022453 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4"} err="failed to get container status \"bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4\": rpc error: code = NotFound desc = could not find container \"bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4\": container with ID starting with bc86d286ad6fb7287efb0ddbbe4a3f2e96b893088600a6f324ea0b85c1668ad4 not found: ID does not exist" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.022956 4954 scope.go:117] "RemoveContainer" containerID="d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e" Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.023329 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e\": container with ID starting with d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e not found: ID does not exist" containerID="d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.023357 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e"} err="failed to get container status \"d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e\": rpc error: code = NotFound desc = could not find container \"d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e\": container with ID starting with d5e0a8330ba109edb773e48759068b2b18faf3c9721939eef14662108d7d759e not found: ID does not exist" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.023377 4954 scope.go:117] "RemoveContainer" containerID="9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014" Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.023737 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014\": container with ID starting with 9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014 not found: ID does not exist" containerID="9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.023757 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014"} err="failed to get container status \"9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014\": rpc error: code = NotFound desc = could not find container \"9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014\": container with ID starting with 9d739a0630226d88e4eae523241053a7b955f9f20aa73922be099de68ddc4014 not found: ID does not exist" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.023769 4954 scope.go:117] "RemoveContainer" containerID="6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166" Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.024171 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166\": container with ID starting with 6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166 not found: ID does not exist" containerID="6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.024297 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166"} err="failed to get container status \"6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166\": rpc error: code = NotFound desc = could not find container \"6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166\": container with ID starting with 6d4996cf55039020dd69d9ea887870c5e151c665e38c8ccbd4f9ddf4323b7166 not found: ID does not exist" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.098689 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e54c34-f8bf-426d-bef1-9e5655376dd0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.241917 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.255963 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.287792 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.288243 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="sg-core" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288262 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="sg-core" Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.288287 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-central-agent" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288295 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-central-agent" Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.288318 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="proxy-httpd" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288325 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="proxy-httpd" Nov 28 16:34:33 crc kubenswrapper[4954]: E1128 16:34:33.288335 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-notification-agent" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288342 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-notification-agent" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288587 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-central-agent" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288613 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="sg-core" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288629 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="proxy-httpd" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.288640 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" containerName="ceilometer-notification-agent" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.296825 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.300651 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.314065 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.318949 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.392748 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.403992 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jr62\" (UniqueName: \"kubernetes.io/projected/40e12dfc-d124-4282-84e9-f2ae628c864d-kube-api-access-4jr62\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.404066 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-config-data\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.404263 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-log-httpd\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.404322 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-run-httpd\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.404361 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-scripts\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.404381 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.404486 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.506650 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-config-data\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.506735 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-log-httpd\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.506765 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-run-httpd\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.506794 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-scripts\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.506817 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.506862 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.506971 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jr62\" (UniqueName: \"kubernetes.io/projected/40e12dfc-d124-4282-84e9-f2ae628c864d-kube-api-access-4jr62\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.507479 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-log-httpd\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.507682 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-run-httpd\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.512546 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.514298 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.514663 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-config-data\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.514772 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-scripts\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.524835 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jr62\" (UniqueName: \"kubernetes.io/projected/40e12dfc-d124-4282-84e9-f2ae628c864d-kube-api-access-4jr62\") pod \"ceilometer-0\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.666349 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:33 crc kubenswrapper[4954]: I1128 16:34:33.872516 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1e54c34-f8bf-426d-bef1-9e5655376dd0" path="/var/lib/kubelet/pods/c1e54c34-f8bf-426d-bef1-9e5655376dd0/volumes" Nov 28 16:34:34 crc kubenswrapper[4954]: I1128 16:34:34.167586 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:34 crc kubenswrapper[4954]: W1128 16:34:34.173864 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40e12dfc_d124_4282_84e9_f2ae628c864d.slice/crio-41aa597ce654b737dd21fcd7af5ca98f9bc1daf7e3937f5c0e0c0cf684061505 WatchSource:0}: Error finding container 41aa597ce654b737dd21fcd7af5ca98f9bc1daf7e3937f5c0e0c0cf684061505: Status 404 returned error can't find the container with id 41aa597ce654b737dd21fcd7af5ca98f9bc1daf7e3937f5c0e0c0cf684061505 Nov 28 16:34:34 crc kubenswrapper[4954]: I1128 16:34:34.176964 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:34:34 crc kubenswrapper[4954]: I1128 16:34:34.936221 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerStarted","Data":"41aa597ce654b737dd21fcd7af5ca98f9bc1daf7e3937f5c0e0c0cf684061505"} Nov 28 16:34:35 crc kubenswrapper[4954]: I1128 16:34:35.947032 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerStarted","Data":"f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c"} Nov 28 16:34:36 crc kubenswrapper[4954]: I1128 16:34:36.957995 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerStarted","Data":"aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa"} Nov 28 16:34:37 crc kubenswrapper[4954]: I1128 16:34:37.949175 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:37 crc kubenswrapper[4954]: I1128 16:34:37.968831 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerStarted","Data":"c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396"} Nov 28 16:34:37 crc kubenswrapper[4954]: I1128 16:34:37.970450 4954 generic.go:334] "Generic (PLEG): container finished" podID="2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" containerID="b10557bf3df3409a81c72de26c7fe710e7c9b5f93df08097a1eabd4e26aa933a" exitCode=0 Nov 28 16:34:37 crc kubenswrapper[4954]: I1128 16:34:37.970491 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-twcpm" event={"ID":"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780","Type":"ContainerDied","Data":"b10557bf3df3409a81c72de26c7fe710e7c9b5f93df08097a1eabd4e26aa933a"} Nov 28 16:34:38 crc kubenswrapper[4954]: I1128 16:34:38.985948 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerStarted","Data":"36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20"} Nov 28 16:34:38 crc kubenswrapper[4954]: I1128 16:34:38.986154 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-central-agent" containerID="cri-o://f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c" gracePeriod=30 Nov 28 16:34:38 crc kubenswrapper[4954]: I1128 16:34:38.986187 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="proxy-httpd" containerID="cri-o://36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20" gracePeriod=30 Nov 28 16:34:38 crc kubenswrapper[4954]: I1128 16:34:38.986235 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-notification-agent" containerID="cri-o://aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa" gracePeriod=30 Nov 28 16:34:38 crc kubenswrapper[4954]: I1128 16:34:38.986247 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="sg-core" containerID="cri-o://c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396" gracePeriod=30 Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.008676 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.586451088 podStartE2EDuration="6.008661425s" podCreationTimestamp="2025-11-28 16:34:33 +0000 UTC" firstStartedPulling="2025-11-28 16:34:34.176696367 +0000 UTC m=+1427.568364918" lastFinishedPulling="2025-11-28 16:34:38.598906714 +0000 UTC m=+1431.990575255" observedRunningTime="2025-11-28 16:34:39.006875978 +0000 UTC m=+1432.398544519" watchObservedRunningTime="2025-11-28 16:34:39.008661425 +0000 UTC m=+1432.400329966" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.280290 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.426631 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-config-data\") pod \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.426709 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nj2lv\" (UniqueName: \"kubernetes.io/projected/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-kube-api-access-nj2lv\") pod \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.426759 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-combined-ca-bundle\") pod \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.426896 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-scripts\") pod \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\" (UID: \"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780\") " Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.432589 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-scripts" (OuterVolumeSpecName: "scripts") pod "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" (UID: "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.436066 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-kube-api-access-nj2lv" (OuterVolumeSpecName: "kube-api-access-nj2lv") pod "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" (UID: "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780"). InnerVolumeSpecName "kube-api-access-nj2lv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.457321 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" (UID: "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.457783 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-config-data" (OuterVolumeSpecName: "config-data") pod "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" (UID: "2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.529649 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.529689 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.529706 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nj2lv\" (UniqueName: \"kubernetes.io/projected/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-kube-api-access-nj2lv\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.529720 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.995349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-twcpm" event={"ID":"2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780","Type":"ContainerDied","Data":"1bdeebf37dce6be8d3d7d887015e6705acb3353d16443b69c3b934bcb154c60b"} Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.995652 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bdeebf37dce6be8d3d7d887015e6705acb3353d16443b69c3b934bcb154c60b" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.995417 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-twcpm" Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.998784 4954 generic.go:334] "Generic (PLEG): container finished" podID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerID="36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20" exitCode=0 Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.998815 4954 generic.go:334] "Generic (PLEG): container finished" podID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerID="c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396" exitCode=2 Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.998824 4954 generic.go:334] "Generic (PLEG): container finished" podID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerID="aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa" exitCode=0 Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.998839 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerDied","Data":"36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20"} Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.998870 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerDied","Data":"c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396"} Nov 28 16:34:39 crc kubenswrapper[4954]: I1128 16:34:39.998883 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerDied","Data":"aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa"} Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.105551 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:34:40 crc kubenswrapper[4954]: E1128 16:34:40.106121 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" containerName="nova-cell0-conductor-db-sync" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.106143 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" containerName="nova-cell0-conductor-db-sync" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.106365 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" containerName="nova-cell0-conductor-db-sync" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.107043 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.110269 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4nkxs" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.110421 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.123401 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.243686 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.243798 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.243884 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgv9x\" (UniqueName: \"kubernetes.io/projected/0f8d636b-e07a-46b1-91b3-899a395e3ce5-kube-api-access-zgv9x\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.345432 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgv9x\" (UniqueName: \"kubernetes.io/projected/0f8d636b-e07a-46b1-91b3-899a395e3ce5-kube-api-access-zgv9x\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.345656 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.345804 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.359511 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.360509 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.375662 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgv9x\" (UniqueName: \"kubernetes.io/projected/0f8d636b-e07a-46b1-91b3-899a395e3ce5-kube-api-access-zgv9x\") pod \"nova-cell0-conductor-0\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.433718 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:40 crc kubenswrapper[4954]: I1128 16:34:40.856546 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:34:41 crc kubenswrapper[4954]: I1128 16:34:41.007574 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f8d636b-e07a-46b1-91b3-899a395e3ce5","Type":"ContainerStarted","Data":"b1bcee61ae09916f73021f0a765fd39e01ffaa7ac8a9ebdc4c5b3e9311a83c72"} Nov 28 16:34:42 crc kubenswrapper[4954]: I1128 16:34:42.018969 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f8d636b-e07a-46b1-91b3-899a395e3ce5","Type":"ContainerStarted","Data":"a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b"} Nov 28 16:34:42 crc kubenswrapper[4954]: I1128 16:34:42.019471 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:42 crc kubenswrapper[4954]: I1128 16:34:42.042315 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.04229655 podStartE2EDuration="2.04229655s" podCreationTimestamp="2025-11-28 16:34:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:42.03330647 +0000 UTC m=+1435.424975011" watchObservedRunningTime="2025-11-28 16:34:42.04229655 +0000 UTC m=+1435.433965081" Nov 28 16:34:42 crc kubenswrapper[4954]: I1128 16:34:42.905571 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004117 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-log-httpd\") pod \"40e12dfc-d124-4282-84e9-f2ae628c864d\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004218 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-run-httpd\") pod \"40e12dfc-d124-4282-84e9-f2ae628c864d\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004250 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-config-data\") pod \"40e12dfc-d124-4282-84e9-f2ae628c864d\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004312 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jr62\" (UniqueName: \"kubernetes.io/projected/40e12dfc-d124-4282-84e9-f2ae628c864d-kube-api-access-4jr62\") pod \"40e12dfc-d124-4282-84e9-f2ae628c864d\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004348 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-sg-core-conf-yaml\") pod \"40e12dfc-d124-4282-84e9-f2ae628c864d\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004378 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-scripts\") pod \"40e12dfc-d124-4282-84e9-f2ae628c864d\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004406 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-combined-ca-bundle\") pod \"40e12dfc-d124-4282-84e9-f2ae628c864d\" (UID: \"40e12dfc-d124-4282-84e9-f2ae628c864d\") " Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.004662 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "40e12dfc-d124-4282-84e9-f2ae628c864d" (UID: "40e12dfc-d124-4282-84e9-f2ae628c864d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.005032 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.005761 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "40e12dfc-d124-4282-84e9-f2ae628c864d" (UID: "40e12dfc-d124-4282-84e9-f2ae628c864d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.010374 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40e12dfc-d124-4282-84e9-f2ae628c864d-kube-api-access-4jr62" (OuterVolumeSpecName: "kube-api-access-4jr62") pod "40e12dfc-d124-4282-84e9-f2ae628c864d" (UID: "40e12dfc-d124-4282-84e9-f2ae628c864d"). InnerVolumeSpecName "kube-api-access-4jr62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.010586 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-scripts" (OuterVolumeSpecName: "scripts") pod "40e12dfc-d124-4282-84e9-f2ae628c864d" (UID: "40e12dfc-d124-4282-84e9-f2ae628c864d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.029236 4954 generic.go:334] "Generic (PLEG): container finished" podID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerID="f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c" exitCode=0 Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.029387 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerDied","Data":"f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c"} Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.029427 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"40e12dfc-d124-4282-84e9-f2ae628c864d","Type":"ContainerDied","Data":"41aa597ce654b737dd21fcd7af5ca98f9bc1daf7e3937f5c0e0c0cf684061505"} Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.029425 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.029444 4954 scope.go:117] "RemoveContainer" containerID="36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.038757 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "40e12dfc-d124-4282-84e9-f2ae628c864d" (UID: "40e12dfc-d124-4282-84e9-f2ae628c864d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.082577 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40e12dfc-d124-4282-84e9-f2ae628c864d" (UID: "40e12dfc-d124-4282-84e9-f2ae628c864d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.103345 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-config-data" (OuterVolumeSpecName: "config-data") pod "40e12dfc-d124-4282-84e9-f2ae628c864d" (UID: "40e12dfc-d124-4282-84e9-f2ae628c864d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.106693 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/40e12dfc-d124-4282-84e9-f2ae628c864d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.106719 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.106747 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jr62\" (UniqueName: \"kubernetes.io/projected/40e12dfc-d124-4282-84e9-f2ae628c864d-kube-api-access-4jr62\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.106757 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.106764 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.106773 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40e12dfc-d124-4282-84e9-f2ae628c864d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.143003 4954 scope.go:117] "RemoveContainer" containerID="c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.182493 4954 scope.go:117] "RemoveContainer" containerID="aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.201898 4954 scope.go:117] "RemoveContainer" containerID="f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.221057 4954 scope.go:117] "RemoveContainer" containerID="36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20" Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.221457 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20\": container with ID starting with 36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20 not found: ID does not exist" containerID="36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.221618 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20"} err="failed to get container status \"36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20\": rpc error: code = NotFound desc = could not find container \"36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20\": container with ID starting with 36d0f7f260f0485aed4069c98d342772b5aab28800deb94fd5317f3349c78d20 not found: ID does not exist" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.221835 4954 scope.go:117] "RemoveContainer" containerID="c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396" Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.222785 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396\": container with ID starting with c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396 not found: ID does not exist" containerID="c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.222809 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396"} err="failed to get container status \"c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396\": rpc error: code = NotFound desc = could not find container \"c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396\": container with ID starting with c0b1d01e3e26958ee80db42fe78faf4e7f9b417c1deeedade82b543da3d57396 not found: ID does not exist" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.222879 4954 scope.go:117] "RemoveContainer" containerID="aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa" Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.223174 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa\": container with ID starting with aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa not found: ID does not exist" containerID="aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.223195 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa"} err="failed to get container status \"aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa\": rpc error: code = NotFound desc = could not find container \"aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa\": container with ID starting with aca6fa47e98ce3c5238733b3d9757bfdd82f6caff1a68e2b24e50b764ab22eaa not found: ID does not exist" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.223209 4954 scope.go:117] "RemoveContainer" containerID="f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c" Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.223605 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c\": container with ID starting with f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c not found: ID does not exist" containerID="f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.223626 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c"} err="failed to get container status \"f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c\": rpc error: code = NotFound desc = could not find container \"f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c\": container with ID starting with f81414d55e522aaa2605f908cb8991c6adcabfb08921acf7b23ebf78f54bc31c not found: ID does not exist" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.360547 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.368634 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.391146 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.391670 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="proxy-httpd" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.391697 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="proxy-httpd" Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.391737 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-central-agent" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.391747 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-central-agent" Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.391770 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-notification-agent" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.391779 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-notification-agent" Nov 28 16:34:43 crc kubenswrapper[4954]: E1128 16:34:43.391798 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="sg-core" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.391806 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="sg-core" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.392024 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-central-agent" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.392080 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="sg-core" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.392096 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="proxy-httpd" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.392111 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" containerName="ceilometer-notification-agent" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.394505 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.397090 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.397300 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.409472 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.518022 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.518080 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-scripts\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.518147 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.518446 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8qfm\" (UniqueName: \"kubernetes.io/projected/5a7176ea-3a4e-4259-9419-f4561f6d443f-kube-api-access-k8qfm\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.518496 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-log-httpd\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.518773 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-config-data\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.518837 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-run-httpd\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.620145 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.620449 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-scripts\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.620481 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.620554 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8qfm\" (UniqueName: \"kubernetes.io/projected/5a7176ea-3a4e-4259-9419-f4561f6d443f-kube-api-access-k8qfm\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.620578 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-log-httpd\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.620594 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-config-data\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.620638 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-run-httpd\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.621013 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-log-httpd\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.621118 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-run-httpd\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.626406 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-scripts\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.627410 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-config-data\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.631355 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.632004 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.650335 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8qfm\" (UniqueName: \"kubernetes.io/projected/5a7176ea-3a4e-4259-9419-f4561f6d443f-kube-api-access-k8qfm\") pod \"ceilometer-0\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.728190 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:34:43 crc kubenswrapper[4954]: I1128 16:34:43.872426 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40e12dfc-d124-4282-84e9-f2ae628c864d" path="/var/lib/kubelet/pods/40e12dfc-d124-4282-84e9-f2ae628c864d/volumes" Nov 28 16:34:44 crc kubenswrapper[4954]: W1128 16:34:44.188877 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a7176ea_3a4e_4259_9419_f4561f6d443f.slice/crio-eead86bc128f26ddd9d6ef2a72b6487ca9fdaa5f6733d49d9dc2010935076ec8 WatchSource:0}: Error finding container eead86bc128f26ddd9d6ef2a72b6487ca9fdaa5f6733d49d9dc2010935076ec8: Status 404 returned error can't find the container with id eead86bc128f26ddd9d6ef2a72b6487ca9fdaa5f6733d49d9dc2010935076ec8 Nov 28 16:34:44 crc kubenswrapper[4954]: I1128 16:34:44.191557 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:34:45 crc kubenswrapper[4954]: I1128 16:34:45.052952 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerStarted","Data":"eead86bc128f26ddd9d6ef2a72b6487ca9fdaa5f6733d49d9dc2010935076ec8"} Nov 28 16:34:46 crc kubenswrapper[4954]: I1128 16:34:46.070979 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerStarted","Data":"38a3386eaf76a0ef28e0bc25f15e90345e3a051125b4898ffafed6dcbf86e981"} Nov 28 16:34:47 crc kubenswrapper[4954]: I1128 16:34:47.091136 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerStarted","Data":"c79523201917283c28abf14b4967c0d52c990e283b8ce254873811fca6ee5e33"} Nov 28 16:34:48 crc kubenswrapper[4954]: I1128 16:34:48.102422 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerStarted","Data":"8e6ada87f371353aaa2d403fef939effc0cb40a0b79e100193235b21cefae6e4"} Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.152349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerStarted","Data":"28ca19b93b21234708bae450568d1c9f9e31b5ac55effc0128c40c7f2cb976be"} Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.153430 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.191336 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.398248037 podStartE2EDuration="7.19130952s" podCreationTimestamp="2025-11-28 16:34:43 +0000 UTC" firstStartedPulling="2025-11-28 16:34:44.19220628 +0000 UTC m=+1437.583874821" lastFinishedPulling="2025-11-28 16:34:48.985267763 +0000 UTC m=+1442.376936304" observedRunningTime="2025-11-28 16:34:50.179977664 +0000 UTC m=+1443.571646205" watchObservedRunningTime="2025-11-28 16:34:50.19130952 +0000 UTC m=+1443.582978061" Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.464591 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.939946 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-crjqq"] Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.941231 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.947243 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.947330 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 16:34:50 crc kubenswrapper[4954]: I1128 16:34:50.984520 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-crjqq"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.111458 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-config-data\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.111512 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.111700 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbg7g\" (UniqueName: \"kubernetes.io/projected/ac64805c-4e48-4c5c-99dd-f724049c6eae-kube-api-access-nbg7g\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.111774 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-scripts\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.192886 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.194907 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.202006 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.212916 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbg7g\" (UniqueName: \"kubernetes.io/projected/ac64805c-4e48-4c5c-99dd-f724049c6eae-kube-api-access-nbg7g\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.213375 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-scripts\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.213585 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-config-data\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.213640 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.221960 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.224197 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-config-data\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.241803 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-scripts\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.241977 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.268411 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbg7g\" (UniqueName: \"kubernetes.io/projected/ac64805c-4e48-4c5c-99dd-f724049c6eae-kube-api-access-nbg7g\") pod \"nova-cell0-cell-mapping-crjqq\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.317671 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.323758 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph54k\" (UniqueName: \"kubernetes.io/projected/fca12d42-7ad3-415b-b1be-4739a6b035a3-kube-api-access-ph54k\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.324117 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fca12d42-7ad3-415b-b1be-4739a6b035a3-logs\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.324163 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.324202 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.351172 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.365542 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.399884 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.407592 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425359 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425407 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425446 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph54k\" (UniqueName: \"kubernetes.io/projected/fca12d42-7ad3-415b-b1be-4739a6b035a3-kube-api-access-ph54k\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425469 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-logs\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425503 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5tbv\" (UniqueName: \"kubernetes.io/projected/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-kube-api-access-z5tbv\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425558 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-config-data\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425634 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.425652 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fca12d42-7ad3-415b-b1be-4739a6b035a3-logs\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.426045 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fca12d42-7ad3-415b-b1be-4739a6b035a3-logs\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.440437 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.458655 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.460061 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.464856 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.466808 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.497447 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph54k\" (UniqueName: \"kubernetes.io/projected/fca12d42-7ad3-415b-b1be-4739a6b035a3-kube-api-access-ph54k\") pod \"nova-api-0\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.516017 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.528824 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-config-data\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.528906 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc7fl\" (UniqueName: \"kubernetes.io/projected/889868e6-7e39-4413-9193-4b222c14efbd-kube-api-access-lc7fl\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.528952 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.528993 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.529076 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-logs\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.529127 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5tbv\" (UniqueName: \"kubernetes.io/projected/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-kube-api-access-z5tbv\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.529147 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.532943 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-logs\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.534448 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-config-data\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.543384 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.554034 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.598545 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5tbv\" (UniqueName: \"kubernetes.io/projected/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-kube-api-access-z5tbv\") pod \"nova-metadata-0\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.598659 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5h2vz"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.600445 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.629788 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.629881 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc7fl\" (UniqueName: \"kubernetes.io/projected/889868e6-7e39-4413-9193-4b222c14efbd-kube-api-access-lc7fl\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.629905 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-config\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.629938 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-svc\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.629981 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.630028 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfbgw\" (UniqueName: \"kubernetes.io/projected/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-kube-api-access-gfbgw\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.630067 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.630100 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.630151 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.641093 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.648449 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.659980 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5h2vz"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.666015 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.693846 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.695276 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.695824 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc7fl\" (UniqueName: \"kubernetes.io/projected/889868e6-7e39-4413-9193-4b222c14efbd-kube-api-access-lc7fl\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.697132 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.735953 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfbgw\" (UniqueName: \"kubernetes.io/projected/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-kube-api-access-gfbgw\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.736037 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.736091 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.736184 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.736262 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-config\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.736295 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-svc\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.737422 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-svc\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.742796 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.743462 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.744143 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.758186 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-config\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.811283 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfbgw\" (UniqueName: \"kubernetes.io/projected/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-kube-api-access-gfbgw\") pod \"dnsmasq-dns-bccf8f775-5h2vz\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.850835 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-config-data\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.851211 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9qt5\" (UniqueName: \"kubernetes.io/projected/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-kube-api-access-v9qt5\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.851331 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.864187 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.907182 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.956452 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-config-data\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.956738 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9qt5\" (UniqueName: \"kubernetes.io/projected/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-kube-api-access-v9qt5\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.956816 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.965714 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.974520 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.981056 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-config-data\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:51 crc kubenswrapper[4954]: I1128 16:34:51.986011 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9qt5\" (UniqueName: \"kubernetes.io/projected/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-kube-api-access-v9qt5\") pod \"nova-scheduler-0\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " pod="openstack/nova-scheduler-0" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.041073 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.289785 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:34:52 crc kubenswrapper[4954]: W1128 16:34:52.347408 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfca12d42_7ad3_415b_b1be_4739a6b035a3.slice/crio-259c412342b8b5a9e9e34b9edf967dde0cfa9c8f800d64588f7c0489f6c5f6c5 WatchSource:0}: Error finding container 259c412342b8b5a9e9e34b9edf967dde0cfa9c8f800d64588f7c0489f6c5f6c5: Status 404 returned error can't find the container with id 259c412342b8b5a9e9e34b9edf967dde0cfa9c8f800d64588f7c0489f6c5f6c5 Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.423009 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-crjqq"] Nov 28 16:34:52 crc kubenswrapper[4954]: W1128 16:34:52.464480 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac64805c_4e48_4c5c_99dd_f724049c6eae.slice/crio-b513bc2a3224e460e54abf0929b6ec3026a4d85f2c7154e14611eaa608d47045 WatchSource:0}: Error finding container b513bc2a3224e460e54abf0929b6ec3026a4d85f2c7154e14611eaa608d47045: Status 404 returned error can't find the container with id b513bc2a3224e460e54abf0929b6ec3026a4d85f2c7154e14611eaa608d47045 Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.543019 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pbz9g"] Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.544546 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.556998 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pbz9g"] Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.577002 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.578144 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.675633 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-config-data\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.675946 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.675982 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zzxm\" (UniqueName: \"kubernetes.io/projected/e15090c7-7e49-4a9e-bad2-65482065e048-kube-api-access-2zzxm\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.676046 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-scripts\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.777611 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-config-data\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.777902 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.778200 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zzxm\" (UniqueName: \"kubernetes.io/projected/e15090c7-7e49-4a9e-bad2-65482065e048-kube-api-access-2zzxm\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.778327 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-scripts\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.784303 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-scripts\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.785803 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.789318 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-config-data\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.803207 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zzxm\" (UniqueName: \"kubernetes.io/projected/e15090c7-7e49-4a9e-bad2-65482065e048-kube-api-access-2zzxm\") pod \"nova-cell1-conductor-db-sync-pbz9g\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.854651 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:52 crc kubenswrapper[4954]: W1128 16:34:52.901914 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b8d56fa_8052_436b_8163_c8d8c44a4b8d.slice/crio-51a0342588131d8a74678e1ef8290438ab6ab39bcb25b5c4022925e40acf2e2d WatchSource:0}: Error finding container 51a0342588131d8a74678e1ef8290438ab6ab39bcb25b5c4022925e40acf2e2d: Status 404 returned error can't find the container with id 51a0342588131d8a74678e1ef8290438ab6ab39bcb25b5c4022925e40acf2e2d Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.906008 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.917089 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:34:52 crc kubenswrapper[4954]: I1128 16:34:52.928935 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5h2vz"] Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.042239 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.215504 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" event={"ID":"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed","Type":"ContainerStarted","Data":"412cee30518e9bdc6e65fabc00ac096214e3384f594333d4cc75e745ef35a3e6"} Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.219947 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fca12d42-7ad3-415b-b1be-4739a6b035a3","Type":"ContainerStarted","Data":"259c412342b8b5a9e9e34b9edf967dde0cfa9c8f800d64588f7c0489f6c5f6c5"} Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.230457 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-crjqq" event={"ID":"ac64805c-4e48-4c5c-99dd-f724049c6eae","Type":"ContainerStarted","Data":"314adb3fa6d873c20f516e93149e8e59ed337d97dff41180c073d8bff231d8d9"} Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.230499 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-crjqq" event={"ID":"ac64805c-4e48-4c5c-99dd-f724049c6eae","Type":"ContainerStarted","Data":"b513bc2a3224e460e54abf0929b6ec3026a4d85f2c7154e14611eaa608d47045"} Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.231486 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"889868e6-7e39-4413-9193-4b222c14efbd","Type":"ContainerStarted","Data":"d5c65cb3d48133ad6835834df891ea27dac46147293d13afbd6bd51c0e694aeb"} Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.233902 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b8d56fa-8052-436b-8163-c8d8c44a4b8d","Type":"ContainerStarted","Data":"51a0342588131d8a74678e1ef8290438ab6ab39bcb25b5c4022925e40acf2e2d"} Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.234765 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d02f5db7-cc0c-41d7-a8c2-435de32b52e7","Type":"ContainerStarted","Data":"258cdb59f9ee6ad7112106f1d555e183db378902685ab620d299c90581f1448b"} Nov 28 16:34:53 crc kubenswrapper[4954]: I1128 16:34:53.484063 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pbz9g"] Nov 28 16:34:53 crc kubenswrapper[4954]: W1128 16:34:53.486704 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode15090c7_7e49_4a9e_bad2_65482065e048.slice/crio-ed00f68d5a20ee754e6f2b7c197bc0ab387fe579e99d73c678e80a9f7c388ac9 WatchSource:0}: Error finding container ed00f68d5a20ee754e6f2b7c197bc0ab387fe579e99d73c678e80a9f7c388ac9: Status 404 returned error can't find the container with id ed00f68d5a20ee754e6f2b7c197bc0ab387fe579e99d73c678e80a9f7c388ac9 Nov 28 16:34:54 crc kubenswrapper[4954]: I1128 16:34:54.257641 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" event={"ID":"e15090c7-7e49-4a9e-bad2-65482065e048","Type":"ContainerStarted","Data":"ed00f68d5a20ee754e6f2b7c197bc0ab387fe579e99d73c678e80a9f7c388ac9"} Nov 28 16:34:54 crc kubenswrapper[4954]: I1128 16:34:54.260485 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" event={"ID":"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed","Type":"ContainerStarted","Data":"d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496"} Nov 28 16:34:54 crc kubenswrapper[4954]: I1128 16:34:54.297884 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-crjqq" podStartSLOduration=4.297868018 podStartE2EDuration="4.297868018s" podCreationTimestamp="2025-11-28 16:34:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:54.296148463 +0000 UTC m=+1447.687817014" watchObservedRunningTime="2025-11-28 16:34:54.297868018 +0000 UTC m=+1447.689536559" Nov 28 16:34:55 crc kubenswrapper[4954]: I1128 16:34:55.277568 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" event={"ID":"e15090c7-7e49-4a9e-bad2-65482065e048","Type":"ContainerStarted","Data":"8c6a6aea7a9dab1c90db663a53190b3d385f2ed88fd26fba749f3549c251570f"} Nov 28 16:34:55 crc kubenswrapper[4954]: I1128 16:34:55.280189 4954 generic.go:334] "Generic (PLEG): container finished" podID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerID="d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496" exitCode=0 Nov 28 16:34:55 crc kubenswrapper[4954]: I1128 16:34:55.280256 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" event={"ID":"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed","Type":"ContainerDied","Data":"d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496"} Nov 28 16:34:55 crc kubenswrapper[4954]: I1128 16:34:55.608961 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:34:55 crc kubenswrapper[4954]: I1128 16:34:55.617738 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:34:57 crc kubenswrapper[4954]: I1128 16:34:57.315763 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" podStartSLOduration=5.315743524 podStartE2EDuration="5.315743524s" podCreationTimestamp="2025-11-28 16:34:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:57.31405188 +0000 UTC m=+1450.705720421" watchObservedRunningTime="2025-11-28 16:34:57.315743524 +0000 UTC m=+1450.707412225" Nov 28 16:34:58 crc kubenswrapper[4954]: I1128 16:34:58.310615 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" event={"ID":"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed","Type":"ContainerStarted","Data":"dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5"} Nov 28 16:34:59 crc kubenswrapper[4954]: I1128 16:34:59.322173 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:34:59 crc kubenswrapper[4954]: I1128 16:34:59.354024 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" podStartSLOduration=8.354007854 podStartE2EDuration="8.354007854s" podCreationTimestamp="2025-11-28 16:34:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:34:59.34610451 +0000 UTC m=+1452.737773051" watchObservedRunningTime="2025-11-28 16:34:59.354007854 +0000 UTC m=+1452.745676395" Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.358166 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b8d56fa-8052-436b-8163-c8d8c44a4b8d","Type":"ContainerStarted","Data":"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760"} Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.360951 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d02f5db7-cc0c-41d7-a8c2-435de32b52e7","Type":"ContainerStarted","Data":"0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3"} Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.363976 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fca12d42-7ad3-415b-b1be-4739a6b035a3","Type":"ContainerStarted","Data":"24f751e3e2c8d7d2258b66a8a4a677d4acc17490e7914d661b0d60c02f205516"} Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.366238 4954 generic.go:334] "Generic (PLEG): container finished" podID="ac64805c-4e48-4c5c-99dd-f724049c6eae" containerID="314adb3fa6d873c20f516e93149e8e59ed337d97dff41180c073d8bff231d8d9" exitCode=0 Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.366314 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-crjqq" event={"ID":"ac64805c-4e48-4c5c-99dd-f724049c6eae","Type":"ContainerDied","Data":"314adb3fa6d873c20f516e93149e8e59ed337d97dff41180c073d8bff231d8d9"} Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.367989 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"889868e6-7e39-4413-9193-4b222c14efbd","Type":"ContainerStarted","Data":"a3125dcd69b8118e16e5518e30671dc7efc580d681489be8f966dc24e6e1ba69"} Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.368135 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="889868e6-7e39-4413-9193-4b222c14efbd" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a3125dcd69b8118e16e5518e30671dc7efc580d681489be8f966dc24e6e1ba69" gracePeriod=30 Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.380294 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.515572325 podStartE2EDuration="10.380277068s" podCreationTimestamp="2025-11-28 16:34:51 +0000 UTC" firstStartedPulling="2025-11-28 16:34:53.051181661 +0000 UTC m=+1446.442850202" lastFinishedPulling="2025-11-28 16:35:00.915886404 +0000 UTC m=+1454.307554945" observedRunningTime="2025-11-28 16:35:01.378336795 +0000 UTC m=+1454.770005336" watchObservedRunningTime="2025-11-28 16:35:01.380277068 +0000 UTC m=+1454.771945609" Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.430869 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.38767404 podStartE2EDuration="10.430851018s" podCreationTimestamp="2025-11-28 16:34:51 +0000 UTC" firstStartedPulling="2025-11-28 16:34:52.859717478 +0000 UTC m=+1446.251386019" lastFinishedPulling="2025-11-28 16:35:00.902894466 +0000 UTC m=+1454.294562997" observedRunningTime="2025-11-28 16:35:01.421855798 +0000 UTC m=+1454.813524359" watchObservedRunningTime="2025-11-28 16:35:01.430851018 +0000 UTC m=+1454.822519559" Nov 28 16:35:01 crc kubenswrapper[4954]: I1128 16:35:01.907434 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.041681 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.041746 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.086347 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.381165 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fca12d42-7ad3-415b-b1be-4739a6b035a3","Type":"ContainerStarted","Data":"b5f872f3abeb204d638458c08cdab2c3af846f6d414097df69868f6e72c5d4c3"} Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.383724 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b8d56fa-8052-436b-8163-c8d8c44a4b8d","Type":"ContainerStarted","Data":"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f"} Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.384058 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-log" containerID="cri-o://9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760" gracePeriod=30 Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.384152 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-metadata" containerID="cri-o://a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f" gracePeriod=30 Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.427443 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.881173993 podStartE2EDuration="11.42739723s" podCreationTimestamp="2025-11-28 16:34:51 +0000 UTC" firstStartedPulling="2025-11-28 16:34:52.355059236 +0000 UTC m=+1445.746727787" lastFinishedPulling="2025-11-28 16:35:00.901282483 +0000 UTC m=+1454.292951024" observedRunningTime="2025-11-28 16:35:02.417122999 +0000 UTC m=+1455.808791550" watchObservedRunningTime="2025-11-28 16:35:02.42739723 +0000 UTC m=+1455.819065791" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.448471 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.449693254 podStartE2EDuration="11.448449869s" podCreationTimestamp="2025-11-28 16:34:51 +0000 UTC" firstStartedPulling="2025-11-28 16:34:52.904605705 +0000 UTC m=+1446.296274246" lastFinishedPulling="2025-11-28 16:35:00.90336231 +0000 UTC m=+1454.295030861" observedRunningTime="2025-11-28 16:35:02.442924331 +0000 UTC m=+1455.834592872" watchObservedRunningTime="2025-11-28 16:35:02.448449869 +0000 UTC m=+1455.840118410" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.455331 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:35:02 crc kubenswrapper[4954]: E1128 16:35:02.558101 4954 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b8d56fa_8052_436b_8163_c8d8c44a4b8d.slice/crio-9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.914821 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:35:02 crc kubenswrapper[4954]: I1128 16:35:02.999911 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-config-data\") pod \"ac64805c-4e48-4c5c-99dd-f724049c6eae\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.000246 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbg7g\" (UniqueName: \"kubernetes.io/projected/ac64805c-4e48-4c5c-99dd-f724049c6eae-kube-api-access-nbg7g\") pod \"ac64805c-4e48-4c5c-99dd-f724049c6eae\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.002415 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-combined-ca-bundle\") pod \"ac64805c-4e48-4c5c-99dd-f724049c6eae\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.002766 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-scripts\") pod \"ac64805c-4e48-4c5c-99dd-f724049c6eae\" (UID: \"ac64805c-4e48-4c5c-99dd-f724049c6eae\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.011672 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac64805c-4e48-4c5c-99dd-f724049c6eae-kube-api-access-nbg7g" (OuterVolumeSpecName: "kube-api-access-nbg7g") pod "ac64805c-4e48-4c5c-99dd-f724049c6eae" (UID: "ac64805c-4e48-4c5c-99dd-f724049c6eae"). InnerVolumeSpecName "kube-api-access-nbg7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.028151 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-scripts" (OuterVolumeSpecName: "scripts") pod "ac64805c-4e48-4c5c-99dd-f724049c6eae" (UID: "ac64805c-4e48-4c5c-99dd-f724049c6eae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.044736 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-config-data" (OuterVolumeSpecName: "config-data") pod "ac64805c-4e48-4c5c-99dd-f724049c6eae" (UID: "ac64805c-4e48-4c5c-99dd-f724049c6eae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.051031 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac64805c-4e48-4c5c-99dd-f724049c6eae" (UID: "ac64805c-4e48-4c5c-99dd-f724049c6eae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.106899 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.106929 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.106940 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbg7g\" (UniqueName: \"kubernetes.io/projected/ac64805c-4e48-4c5c-99dd-f724049c6eae-kube-api-access-nbg7g\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.107099 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac64805c-4e48-4c5c-99dd-f724049c6eae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.141031 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.309676 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-combined-ca-bundle\") pod \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.309731 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-logs\") pod \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.309821 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5tbv\" (UniqueName: \"kubernetes.io/projected/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-kube-api-access-z5tbv\") pod \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.309902 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-config-data\") pod \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\" (UID: \"2b8d56fa-8052-436b-8163-c8d8c44a4b8d\") " Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.310288 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-logs" (OuterVolumeSpecName: "logs") pod "2b8d56fa-8052-436b-8163-c8d8c44a4b8d" (UID: "2b8d56fa-8052-436b-8163-c8d8c44a4b8d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.319791 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-kube-api-access-z5tbv" (OuterVolumeSpecName: "kube-api-access-z5tbv") pod "2b8d56fa-8052-436b-8163-c8d8c44a4b8d" (UID: "2b8d56fa-8052-436b-8163-c8d8c44a4b8d"). InnerVolumeSpecName "kube-api-access-z5tbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.338965 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-config-data" (OuterVolumeSpecName: "config-data") pod "2b8d56fa-8052-436b-8163-c8d8c44a4b8d" (UID: "2b8d56fa-8052-436b-8163-c8d8c44a4b8d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.348903 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b8d56fa-8052-436b-8163-c8d8c44a4b8d" (UID: "2b8d56fa-8052-436b-8163-c8d8c44a4b8d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.393767 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-crjqq" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.393773 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-crjqq" event={"ID":"ac64805c-4e48-4c5c-99dd-f724049c6eae","Type":"ContainerDied","Data":"b513bc2a3224e460e54abf0929b6ec3026a4d85f2c7154e14611eaa608d47045"} Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.393832 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b513bc2a3224e460e54abf0929b6ec3026a4d85f2c7154e14611eaa608d47045" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.395911 4954 generic.go:334] "Generic (PLEG): container finished" podID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerID="a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f" exitCode=0 Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.395956 4954 generic.go:334] "Generic (PLEG): container finished" podID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerID="9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760" exitCode=143 Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.395999 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.396022 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b8d56fa-8052-436b-8163-c8d8c44a4b8d","Type":"ContainerDied","Data":"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f"} Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.396085 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b8d56fa-8052-436b-8163-c8d8c44a4b8d","Type":"ContainerDied","Data":"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760"} Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.396103 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b8d56fa-8052-436b-8163-c8d8c44a4b8d","Type":"ContainerDied","Data":"51a0342588131d8a74678e1ef8290438ab6ab39bcb25b5c4022925e40acf2e2d"} Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.396125 4954 scope.go:117] "RemoveContainer" containerID="a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.413481 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.413518 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.413712 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.413721 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5tbv\" (UniqueName: \"kubernetes.io/projected/2b8d56fa-8052-436b-8163-c8d8c44a4b8d-kube-api-access-z5tbv\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.435045 4954 scope.go:117] "RemoveContainer" containerID="9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.457466 4954 scope.go:117] "RemoveContainer" containerID="a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f" Nov 28 16:35:03 crc kubenswrapper[4954]: E1128 16:35:03.457918 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f\": container with ID starting with a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f not found: ID does not exist" containerID="a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.457946 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f"} err="failed to get container status \"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f\": rpc error: code = NotFound desc = could not find container \"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f\": container with ID starting with a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f not found: ID does not exist" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.457965 4954 scope.go:117] "RemoveContainer" containerID="9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.460438 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:03 crc kubenswrapper[4954]: E1128 16:35:03.460763 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760\": container with ID starting with 9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760 not found: ID does not exist" containerID="9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.460788 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760"} err="failed to get container status \"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760\": rpc error: code = NotFound desc = could not find container \"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760\": container with ID starting with 9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760 not found: ID does not exist" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.460802 4954 scope.go:117] "RemoveContainer" containerID="a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.461054 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f"} err="failed to get container status \"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f\": rpc error: code = NotFound desc = could not find container \"a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f\": container with ID starting with a92fe18643235d89063b5d0a29bee83683c1094f89b53c432468f8f2820fd22f not found: ID does not exist" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.461075 4954 scope.go:117] "RemoveContainer" containerID="9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.461456 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760"} err="failed to get container status \"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760\": rpc error: code = NotFound desc = could not find container \"9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760\": container with ID starting with 9a60648b95b77d7641a7204a6a9163c75b5946c96289d5e19a0d112ec283a760 not found: ID does not exist" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.471897 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.490552 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:03 crc kubenswrapper[4954]: E1128 16:35:03.493857 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac64805c-4e48-4c5c-99dd-f724049c6eae" containerName="nova-manage" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.493888 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac64805c-4e48-4c5c-99dd-f724049c6eae" containerName="nova-manage" Nov 28 16:35:03 crc kubenswrapper[4954]: E1128 16:35:03.493952 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-log" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.493960 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-log" Nov 28 16:35:03 crc kubenswrapper[4954]: E1128 16:35:03.494000 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-metadata" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.494008 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-metadata" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.494710 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-log" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.494737 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac64805c-4e48-4c5c-99dd-f724049c6eae" containerName="nova-manage" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.494762 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" containerName="nova-metadata-metadata" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.496480 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.503752 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.505466 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.622942 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.644142 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.660484 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61a57031-5eda-40a0-9bbf-5118475d6054-logs\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.660828 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.660927 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.661007 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kd4sq\" (UniqueName: \"kubernetes.io/projected/61a57031-5eda-40a0-9bbf-5118475d6054-kube-api-access-kd4sq\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.661166 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-config-data\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.704523 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.739587 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:03 crc kubenswrapper[4954]: E1128 16:35:03.740428 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-kd4sq logs nova-metadata-tls-certs], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/nova-metadata-0" podUID="61a57031-5eda-40a0-9bbf-5118475d6054" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.765253 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.765940 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.765976 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kd4sq\" (UniqueName: \"kubernetes.io/projected/61a57031-5eda-40a0-9bbf-5118475d6054-kube-api-access-kd4sq\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.766067 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-config-data\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.766177 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61a57031-5eda-40a0-9bbf-5118475d6054-logs\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.766568 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61a57031-5eda-40a0-9bbf-5118475d6054-logs\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.770039 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.770973 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.775064 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-config-data\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.786103 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kd4sq\" (UniqueName: \"kubernetes.io/projected/61a57031-5eda-40a0-9bbf-5118475d6054-kube-api-access-kd4sq\") pod \"nova-metadata-0\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " pod="openstack/nova-metadata-0" Nov 28 16:35:03 crc kubenswrapper[4954]: I1128 16:35:03.870682 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b8d56fa-8052-436b-8163-c8d8c44a4b8d" path="/var/lib/kubelet/pods/2b8d56fa-8052-436b-8163-c8d8c44a4b8d/volumes" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.408172 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.408316 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-log" containerID="cri-o://24f751e3e2c8d7d2258b66a8a4a677d4acc17490e7914d661b0d60c02f205516" gracePeriod=30 Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.408432 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-api" containerID="cri-o://b5f872f3abeb204d638458c08cdab2c3af846f6d414097df69868f6e72c5d4c3" gracePeriod=30 Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.424319 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.580240 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-combined-ca-bundle\") pod \"61a57031-5eda-40a0-9bbf-5118475d6054\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.580284 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kd4sq\" (UniqueName: \"kubernetes.io/projected/61a57031-5eda-40a0-9bbf-5118475d6054-kube-api-access-kd4sq\") pod \"61a57031-5eda-40a0-9bbf-5118475d6054\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.580354 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-config-data\") pod \"61a57031-5eda-40a0-9bbf-5118475d6054\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.580372 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61a57031-5eda-40a0-9bbf-5118475d6054-logs\") pod \"61a57031-5eda-40a0-9bbf-5118475d6054\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.580416 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-nova-metadata-tls-certs\") pod \"61a57031-5eda-40a0-9bbf-5118475d6054\" (UID: \"61a57031-5eda-40a0-9bbf-5118475d6054\") " Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.580690 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61a57031-5eda-40a0-9bbf-5118475d6054-logs" (OuterVolumeSpecName: "logs") pod "61a57031-5eda-40a0-9bbf-5118475d6054" (UID: "61a57031-5eda-40a0-9bbf-5118475d6054"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.599124 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61a57031-5eda-40a0-9bbf-5118475d6054" (UID: "61a57031-5eda-40a0-9bbf-5118475d6054"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.614187 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-config-data" (OuterVolumeSpecName: "config-data") pod "61a57031-5eda-40a0-9bbf-5118475d6054" (UID: "61a57031-5eda-40a0-9bbf-5118475d6054"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.614249 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61a57031-5eda-40a0-9bbf-5118475d6054-kube-api-access-kd4sq" (OuterVolumeSpecName: "kube-api-access-kd4sq") pod "61a57031-5eda-40a0-9bbf-5118475d6054" (UID: "61a57031-5eda-40a0-9bbf-5118475d6054"). InnerVolumeSpecName "kube-api-access-kd4sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.614429 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "61a57031-5eda-40a0-9bbf-5118475d6054" (UID: "61a57031-5eda-40a0-9bbf-5118475d6054"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.682302 4954 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.682333 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.682342 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kd4sq\" (UniqueName: \"kubernetes.io/projected/61a57031-5eda-40a0-9bbf-5118475d6054-kube-api-access-kd4sq\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.682352 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a57031-5eda-40a0-9bbf-5118475d6054-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:04 crc kubenswrapper[4954]: I1128 16:35:04.682360 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61a57031-5eda-40a0-9bbf-5118475d6054-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.422424 4954 generic.go:334] "Generic (PLEG): container finished" podID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerID="b5f872f3abeb204d638458c08cdab2c3af846f6d414097df69868f6e72c5d4c3" exitCode=0 Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.422746 4954 generic.go:334] "Generic (PLEG): container finished" podID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerID="24f751e3e2c8d7d2258b66a8a4a677d4acc17490e7914d661b0d60c02f205516" exitCode=143 Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.422636 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fca12d42-7ad3-415b-b1be-4739a6b035a3","Type":"ContainerDied","Data":"b5f872f3abeb204d638458c08cdab2c3af846f6d414097df69868f6e72c5d4c3"} Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.422852 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fca12d42-7ad3-415b-b1be-4739a6b035a3","Type":"ContainerDied","Data":"24f751e3e2c8d7d2258b66a8a4a677d4acc17490e7914d661b0d60c02f205516"} Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.422910 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerName="nova-scheduler-scheduler" containerID="cri-o://0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" gracePeriod=30 Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.422947 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.498168 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.506994 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.527021 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.529000 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.530977 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.531074 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.542595 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.705013 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.705063 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.705094 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-config-data\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.705114 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24190ac6-aa40-4fcf-8722-f86426458655-logs\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.705160 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkqzr\" (UniqueName: \"kubernetes.io/projected/24190ac6-aa40-4fcf-8722-f86426458655-kube-api-access-pkqzr\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.806960 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.807005 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.807028 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-config-data\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.807046 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24190ac6-aa40-4fcf-8722-f86426458655-logs\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.807090 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkqzr\" (UniqueName: \"kubernetes.io/projected/24190ac6-aa40-4fcf-8722-f86426458655-kube-api-access-pkqzr\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.807787 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24190ac6-aa40-4fcf-8722-f86426458655-logs\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.812056 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-config-data\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.812908 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.819506 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.824967 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkqzr\" (UniqueName: \"kubernetes.io/projected/24190ac6-aa40-4fcf-8722-f86426458655-kube-api-access-pkqzr\") pod \"nova-metadata-0\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.856044 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:05 crc kubenswrapper[4954]: I1128 16:35:05.869572 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61a57031-5eda-40a0-9bbf-5118475d6054" path="/var/lib/kubelet/pods/61a57031-5eda-40a0-9bbf-5118475d6054/volumes" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.310313 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.441859 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fca12d42-7ad3-415b-b1be-4739a6b035a3","Type":"ContainerDied","Data":"259c412342b8b5a9e9e34b9edf967dde0cfa9c8f800d64588f7c0489f6c5f6c5"} Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.441914 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="259c412342b8b5a9e9e34b9edf967dde0cfa9c8f800d64588f7c0489f6c5f6c5" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.444236 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24190ac6-aa40-4fcf-8722-f86426458655","Type":"ContainerStarted","Data":"2fdbf8ebd7a580d447e6cac86a33a27326a4bb797380b2021583483db4e6ebbc"} Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.455176 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.521174 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data\") pod \"fca12d42-7ad3-415b-b1be-4739a6b035a3\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.521246 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fca12d42-7ad3-415b-b1be-4739a6b035a3-logs\") pod \"fca12d42-7ad3-415b-b1be-4739a6b035a3\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.521357 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph54k\" (UniqueName: \"kubernetes.io/projected/fca12d42-7ad3-415b-b1be-4739a6b035a3-kube-api-access-ph54k\") pod \"fca12d42-7ad3-415b-b1be-4739a6b035a3\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.521497 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-combined-ca-bundle\") pod \"fca12d42-7ad3-415b-b1be-4739a6b035a3\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.521731 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fca12d42-7ad3-415b-b1be-4739a6b035a3-logs" (OuterVolumeSpecName: "logs") pod "fca12d42-7ad3-415b-b1be-4739a6b035a3" (UID: "fca12d42-7ad3-415b-b1be-4739a6b035a3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.522190 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fca12d42-7ad3-415b-b1be-4739a6b035a3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.525717 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fca12d42-7ad3-415b-b1be-4739a6b035a3-kube-api-access-ph54k" (OuterVolumeSpecName: "kube-api-access-ph54k") pod "fca12d42-7ad3-415b-b1be-4739a6b035a3" (UID: "fca12d42-7ad3-415b-b1be-4739a6b035a3"). InnerVolumeSpecName "kube-api-access-ph54k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:06 crc kubenswrapper[4954]: E1128 16:35:06.543578 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data podName:fca12d42-7ad3-415b-b1be-4739a6b035a3 nodeName:}" failed. No retries permitted until 2025-11-28 16:35:07.043510387 +0000 UTC m=+1460.435178958 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data") pod "fca12d42-7ad3-415b-b1be-4739a6b035a3" (UID: "fca12d42-7ad3-415b-b1be-4739a6b035a3") : error deleting /var/lib/kubelet/pods/fca12d42-7ad3-415b-b1be-4739a6b035a3/volume-subpaths: remove /var/lib/kubelet/pods/fca12d42-7ad3-415b-b1be-4739a6b035a3/volume-subpaths: no such file or directory Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.545733 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fca12d42-7ad3-415b-b1be-4739a6b035a3" (UID: "fca12d42-7ad3-415b-b1be-4739a6b035a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.625514 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph54k\" (UniqueName: \"kubernetes.io/projected/fca12d42-7ad3-415b-b1be-4739a6b035a3-kube-api-access-ph54k\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.625568 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:06 crc kubenswrapper[4954]: I1128 16:35:06.967726 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.040371 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-tjlhh"] Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.040643 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" podUID="baee5634-1827-46e1-884d-6ea76415535e" containerName="dnsmasq-dns" containerID="cri-o://5798abdd42e2387bd7b208030510f19c5d0e913d5d27b7c3d3569033adf96da3" gracePeriod=10 Nov 28 16:35:07 crc kubenswrapper[4954]: E1128 16:35:07.047830 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:07 crc kubenswrapper[4954]: E1128 16:35:07.054544 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:07 crc kubenswrapper[4954]: E1128 16:35:07.057310 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:07 crc kubenswrapper[4954]: E1128 16:35:07.057374 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerName="nova-scheduler-scheduler" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.139368 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data\") pod \"fca12d42-7ad3-415b-b1be-4739a6b035a3\" (UID: \"fca12d42-7ad3-415b-b1be-4739a6b035a3\") " Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.143591 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data" (OuterVolumeSpecName: "config-data") pod "fca12d42-7ad3-415b-b1be-4739a6b035a3" (UID: "fca12d42-7ad3-415b-b1be-4739a6b035a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.242248 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fca12d42-7ad3-415b-b1be-4739a6b035a3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.455194 4954 generic.go:334] "Generic (PLEG): container finished" podID="baee5634-1827-46e1-884d-6ea76415535e" containerID="5798abdd42e2387bd7b208030510f19c5d0e913d5d27b7c3d3569033adf96da3" exitCode=0 Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.455259 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" event={"ID":"baee5634-1827-46e1-884d-6ea76415535e","Type":"ContainerDied","Data":"5798abdd42e2387bd7b208030510f19c5d0e913d5d27b7c3d3569033adf96da3"} Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.459035 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24190ac6-aa40-4fcf-8722-f86426458655","Type":"ContainerStarted","Data":"53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0"} Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.459071 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24190ac6-aa40-4fcf-8722-f86426458655","Type":"ContainerStarted","Data":"9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8"} Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.459089 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.497590 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.505828 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.521559 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:07 crc kubenswrapper[4954]: E1128 16:35:07.522089 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-log" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.522111 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-log" Nov 28 16:35:07 crc kubenswrapper[4954]: E1128 16:35:07.522145 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-api" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.522155 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-api" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.522383 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-log" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.522417 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" containerName="nova-api-api" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.523649 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.526051 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.533101 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.547589 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jv4w\" (UniqueName: \"kubernetes.io/projected/b3314fcc-d027-4e35-8739-b7f590c0cfb9-kube-api-access-6jv4w\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.547669 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3314fcc-d027-4e35-8739-b7f590c0cfb9-logs\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.547714 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.547741 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-config-data\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.649191 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jv4w\" (UniqueName: \"kubernetes.io/projected/b3314fcc-d027-4e35-8739-b7f590c0cfb9-kube-api-access-6jv4w\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.649286 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3314fcc-d027-4e35-8739-b7f590c0cfb9-logs\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.649338 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.649363 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-config-data\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.650277 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3314fcc-d027-4e35-8739-b7f590c0cfb9-logs\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.655302 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-config-data\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.656960 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.669375 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jv4w\" (UniqueName: \"kubernetes.io/projected/b3314fcc-d027-4e35-8739-b7f590c0cfb9-kube-api-access-6jv4w\") pod \"nova-api-0\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " pod="openstack/nova-api-0" Nov 28 16:35:07 crc kubenswrapper[4954]: I1128 16:35:07.841727 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:08 crc kubenswrapper[4954]: I1128 16:35:08.174651 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fca12d42-7ad3-415b-b1be-4739a6b035a3" path="/var/lib/kubelet/pods/fca12d42-7ad3-415b-b1be-4739a6b035a3/volumes" Nov 28 16:35:08 crc kubenswrapper[4954]: I1128 16:35:08.622799 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:08 crc kubenswrapper[4954]: W1128 16:35:08.628586 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3314fcc_d027_4e35_8739_b7f590c0cfb9.slice/crio-5a0ae0b4e7116cc59c4f09f6de952d0d914401bcbed00b3ba2189c326666c7e4 WatchSource:0}: Error finding container 5a0ae0b4e7116cc59c4f09f6de952d0d914401bcbed00b3ba2189c326666c7e4: Status 404 returned error can't find the container with id 5a0ae0b4e7116cc59c4f09f6de952d0d914401bcbed00b3ba2189c326666c7e4 Nov 28 16:35:09 crc kubenswrapper[4954]: I1128 16:35:09.479036 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3314fcc-d027-4e35-8739-b7f590c0cfb9","Type":"ContainerStarted","Data":"5a0ae0b4e7116cc59c4f09f6de952d0d914401bcbed00b3ba2189c326666c7e4"} Nov 28 16:35:09 crc kubenswrapper[4954]: I1128 16:35:09.501886 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.501867804 podStartE2EDuration="4.501867804s" podCreationTimestamp="2025-11-28 16:35:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:09.496752799 +0000 UTC m=+1462.888421340" watchObservedRunningTime="2025-11-28 16:35:09.501867804 +0000 UTC m=+1462.893536345" Nov 28 16:35:10 crc kubenswrapper[4954]: I1128 16:35:10.856790 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:35:10 crc kubenswrapper[4954]: I1128 16:35:10.857138 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.065007 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.217501 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcqmz\" (UniqueName: \"kubernetes.io/projected/baee5634-1827-46e1-884d-6ea76415535e-kube-api-access-gcqmz\") pod \"baee5634-1827-46e1-884d-6ea76415535e\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.217611 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-nb\") pod \"baee5634-1827-46e1-884d-6ea76415535e\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.217714 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-sb\") pod \"baee5634-1827-46e1-884d-6ea76415535e\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.217757 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-config\") pod \"baee5634-1827-46e1-884d-6ea76415535e\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.217809 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-svc\") pod \"baee5634-1827-46e1-884d-6ea76415535e\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.217833 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-swift-storage-0\") pod \"baee5634-1827-46e1-884d-6ea76415535e\" (UID: \"baee5634-1827-46e1-884d-6ea76415535e\") " Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.225823 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baee5634-1827-46e1-884d-6ea76415535e-kube-api-access-gcqmz" (OuterVolumeSpecName: "kube-api-access-gcqmz") pod "baee5634-1827-46e1-884d-6ea76415535e" (UID: "baee5634-1827-46e1-884d-6ea76415535e"). InnerVolumeSpecName "kube-api-access-gcqmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.270998 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "baee5634-1827-46e1-884d-6ea76415535e" (UID: "baee5634-1827-46e1-884d-6ea76415535e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.272890 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "baee5634-1827-46e1-884d-6ea76415535e" (UID: "baee5634-1827-46e1-884d-6ea76415535e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.273336 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "baee5634-1827-46e1-884d-6ea76415535e" (UID: "baee5634-1827-46e1-884d-6ea76415535e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.275559 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-config" (OuterVolumeSpecName: "config") pod "baee5634-1827-46e1-884d-6ea76415535e" (UID: "baee5634-1827-46e1-884d-6ea76415535e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.275931 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "baee5634-1827-46e1-884d-6ea76415535e" (UID: "baee5634-1827-46e1-884d-6ea76415535e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.320116 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcqmz\" (UniqueName: \"kubernetes.io/projected/baee5634-1827-46e1-884d-6ea76415535e-kube-api-access-gcqmz\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.320173 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.320192 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.320207 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.320220 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.320230 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/baee5634-1827-46e1-884d-6ea76415535e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.502698 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" event={"ID":"baee5634-1827-46e1-884d-6ea76415535e","Type":"ContainerDied","Data":"c74324e080b76734eee4fbebae3d55db44b2fe00f27416484c0e24e0f4a5b584"} Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.502751 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.502773 4954 scope.go:117] "RemoveContainer" containerID="5798abdd42e2387bd7b208030510f19c5d0e913d5d27b7c3d3569033adf96da3" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.505746 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3314fcc-d027-4e35-8739-b7f590c0cfb9","Type":"ContainerStarted","Data":"4205f08df3d9e3e41f04d1d7b5eb3f1b08f78f9afea057c569d969d88dd59254"} Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.529745 4954 scope.go:117] "RemoveContainer" containerID="1c9e4a3c552ca4fe86935252803f477885b013536c1d4f2bc415fd6b706a9565" Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.545397 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-tjlhh"] Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.554421 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-tjlhh"] Nov 28 16:35:11 crc kubenswrapper[4954]: I1128 16:35:11.869514 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baee5634-1827-46e1-884d-6ea76415535e" path="/var/lib/kubelet/pods/baee5634-1827-46e1-884d-6ea76415535e/volumes" Nov 28 16:35:12 crc kubenswrapper[4954]: E1128 16:35:12.045049 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:12 crc kubenswrapper[4954]: E1128 16:35:12.046792 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:12 crc kubenswrapper[4954]: E1128 16:35:12.047989 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:12 crc kubenswrapper[4954]: E1128 16:35:12.048035 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerName="nova-scheduler-scheduler" Nov 28 16:35:12 crc kubenswrapper[4954]: I1128 16:35:12.537642 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3314fcc-d027-4e35-8739-b7f590c0cfb9","Type":"ContainerStarted","Data":"541d577d54af348454af761b14e45c18f84c95b0a7a06bc84d9d4ef605761925"} Nov 28 16:35:12 crc kubenswrapper[4954]: I1128 16:35:12.573758 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=5.573738601 podStartE2EDuration="5.573738601s" podCreationTimestamp="2025-11-28 16:35:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:12.567638524 +0000 UTC m=+1465.959307105" watchObservedRunningTime="2025-11-28 16:35:12.573738601 +0000 UTC m=+1465.965407152" Nov 28 16:35:13 crc kubenswrapper[4954]: I1128 16:35:13.734685 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 16:35:15 crc kubenswrapper[4954]: I1128 16:35:15.867275 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:35:15 crc kubenswrapper[4954]: I1128 16:35:15.867558 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:35:15 crc kubenswrapper[4954]: I1128 16:35:15.969177 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6578955fd5-tjlhh" podUID="baee5634-1827-46e1-884d-6ea76415535e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.161:5353: i/o timeout" Nov 28 16:35:16 crc kubenswrapper[4954]: I1128 16:35:16.872644 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:16 crc kubenswrapper[4954]: I1128 16:35:16.872788 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:17 crc kubenswrapper[4954]: E1128 16:35:17.043949 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:17 crc kubenswrapper[4954]: E1128 16:35:17.045772 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:17 crc kubenswrapper[4954]: E1128 16:35:17.047565 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:35:17 crc kubenswrapper[4954]: E1128 16:35:17.047599 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerName="nova-scheduler-scheduler" Nov 28 16:35:17 crc kubenswrapper[4954]: I1128 16:35:17.510901 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:35:17 crc kubenswrapper[4954]: I1128 16:35:17.511140 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="dc71f947-acc3-4867-8e70-0f2def81dc03" containerName="kube-state-metrics" containerID="cri-o://26eea4df2280948534275499d9644069915f6fc0ceb14efb26ac10a1655c3f0d" gracePeriod=30 Nov 28 16:35:17 crc kubenswrapper[4954]: I1128 16:35:17.843162 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:17 crc kubenswrapper[4954]: I1128 16:35:17.843501 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.614991 4954 generic.go:334] "Generic (PLEG): container finished" podID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" exitCode=0 Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.615306 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d02f5db7-cc0c-41d7-a8c2-435de32b52e7","Type":"ContainerDied","Data":"0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3"} Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.623680 4954 generic.go:334] "Generic (PLEG): container finished" podID="dc71f947-acc3-4867-8e70-0f2def81dc03" containerID="26eea4df2280948534275499d9644069915f6fc0ceb14efb26ac10a1655c3f0d" exitCode=2 Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.623737 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc71f947-acc3-4867-8e70-0f2def81dc03","Type":"ContainerDied","Data":"26eea4df2280948534275499d9644069915f6fc0ceb14efb26ac10a1655c3f0d"} Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.841334 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.853057 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.925242 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.925619 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.970667 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-combined-ca-bundle\") pod \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.970772 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-config-data\") pod \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.970812 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7b4df\" (UniqueName: \"kubernetes.io/projected/dc71f947-acc3-4867-8e70-0f2def81dc03-kube-api-access-7b4df\") pod \"dc71f947-acc3-4867-8e70-0f2def81dc03\" (UID: \"dc71f947-acc3-4867-8e70-0f2def81dc03\") " Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.970851 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9qt5\" (UniqueName: \"kubernetes.io/projected/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-kube-api-access-v9qt5\") pod \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\" (UID: \"d02f5db7-cc0c-41d7-a8c2-435de32b52e7\") " Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.977025 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-kube-api-access-v9qt5" (OuterVolumeSpecName: "kube-api-access-v9qt5") pod "d02f5db7-cc0c-41d7-a8c2-435de32b52e7" (UID: "d02f5db7-cc0c-41d7-a8c2-435de32b52e7"). InnerVolumeSpecName "kube-api-access-v9qt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:18 crc kubenswrapper[4954]: I1128 16:35:18.993571 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc71f947-acc3-4867-8e70-0f2def81dc03-kube-api-access-7b4df" (OuterVolumeSpecName: "kube-api-access-7b4df") pod "dc71f947-acc3-4867-8e70-0f2def81dc03" (UID: "dc71f947-acc3-4867-8e70-0f2def81dc03"). InnerVolumeSpecName "kube-api-access-7b4df". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.004336 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-config-data" (OuterVolumeSpecName: "config-data") pod "d02f5db7-cc0c-41d7-a8c2-435de32b52e7" (UID: "d02f5db7-cc0c-41d7-a8c2-435de32b52e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.009809 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d02f5db7-cc0c-41d7-a8c2-435de32b52e7" (UID: "d02f5db7-cc0c-41d7-a8c2-435de32b52e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.073267 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.073310 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.073322 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7b4df\" (UniqueName: \"kubernetes.io/projected/dc71f947-acc3-4867-8e70-0f2def81dc03-kube-api-access-7b4df\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.073332 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9qt5\" (UniqueName: \"kubernetes.io/projected/d02f5db7-cc0c-41d7-a8c2-435de32b52e7-kube-api-access-v9qt5\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.633723 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"dc71f947-acc3-4867-8e70-0f2def81dc03","Type":"ContainerDied","Data":"a70560ccbf5fff039847fd4be765f18e26d087a622c00f712f67b08b6abfd945"} Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.633784 4954 scope.go:117] "RemoveContainer" containerID="26eea4df2280948534275499d9644069915f6fc0ceb14efb26ac10a1655c3f0d" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.633741 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.636711 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d02f5db7-cc0c-41d7-a8c2-435de32b52e7","Type":"ContainerDied","Data":"258cdb59f9ee6ad7112106f1d555e183db378902685ab620d299c90581f1448b"} Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.636800 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.679361 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.681815 4954 scope.go:117] "RemoveContainer" containerID="0a424ea7e730c157f9fb1a5aff66a85d862e9baa4905ba979bac57f9c264adf3" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.693661 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.723670 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: E1128 16:35:19.724129 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerName="nova-scheduler-scheduler" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.724150 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerName="nova-scheduler-scheduler" Nov 28 16:35:19 crc kubenswrapper[4954]: E1128 16:35:19.724170 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc71f947-acc3-4867-8e70-0f2def81dc03" containerName="kube-state-metrics" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.724177 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc71f947-acc3-4867-8e70-0f2def81dc03" containerName="kube-state-metrics" Nov 28 16:35:19 crc kubenswrapper[4954]: E1128 16:35:19.724189 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baee5634-1827-46e1-884d-6ea76415535e" containerName="init" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.724195 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="baee5634-1827-46e1-884d-6ea76415535e" containerName="init" Nov 28 16:35:19 crc kubenswrapper[4954]: E1128 16:35:19.724224 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baee5634-1827-46e1-884d-6ea76415535e" containerName="dnsmasq-dns" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.724231 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="baee5634-1827-46e1-884d-6ea76415535e" containerName="dnsmasq-dns" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.724402 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc71f947-acc3-4867-8e70-0f2def81dc03" containerName="kube-state-metrics" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.724413 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" containerName="nova-scheduler-scheduler" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.724429 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="baee5634-1827-46e1-884d-6ea76415535e" containerName="dnsmasq-dns" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.725650 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.729336 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.729629 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.739854 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.750946 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.759900 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.770865 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.772111 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.773747 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.788342 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.868183 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d02f5db7-cc0c-41d7-a8c2-435de32b52e7" path="/var/lib/kubelet/pods/d02f5db7-cc0c-41d7-a8c2-435de32b52e7/volumes" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.868816 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc71f947-acc3-4867-8e70-0f2def81dc03" path="/var/lib/kubelet/pods/dc71f947-acc3-4867-8e70-0f2def81dc03/volumes" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.895223 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.895288 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld8zk\" (UniqueName: \"kubernetes.io/projected/9c381eb3-f466-40b7-a962-6782db85678c-kube-api-access-ld8zk\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.895438 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.895572 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5dwk\" (UniqueName: \"kubernetes.io/projected/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-kube-api-access-z5dwk\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.895689 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.895852 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.896038 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-config-data\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.998378 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.998480 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld8zk\" (UniqueName: \"kubernetes.io/projected/9c381eb3-f466-40b7-a962-6782db85678c-kube-api-access-ld8zk\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.998570 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.998609 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5dwk\" (UniqueName: \"kubernetes.io/projected/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-kube-api-access-z5dwk\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.998644 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.998700 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:19 crc kubenswrapper[4954]: I1128 16:35:19.998777 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-config-data\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.002015 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.002354 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-central-agent" containerID="cri-o://38a3386eaf76a0ef28e0bc25f15e90345e3a051125b4898ffafed6dcbf86e981" gracePeriod=30 Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.007492 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="sg-core" containerID="cri-o://8e6ada87f371353aaa2d403fef939effc0cb40a0b79e100193235b21cefae6e4" gracePeriod=30 Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.007547 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="proxy-httpd" containerID="cri-o://28ca19b93b21234708bae450568d1c9f9e31b5ac55effc0128c40c7f2cb976be" gracePeriod=30 Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.007569 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-notification-agent" containerID="cri-o://c79523201917283c28abf14b4967c0d52c990e283b8ce254873811fca6ee5e33" gracePeriod=30 Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.008378 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.008439 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-config-data\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.008601 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.008694 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.028483 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld8zk\" (UniqueName: \"kubernetes.io/projected/9c381eb3-f466-40b7-a962-6782db85678c-kube-api-access-ld8zk\") pod \"kube-state-metrics-0\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " pod="openstack/kube-state-metrics-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.029773 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5dwk\" (UniqueName: \"kubernetes.io/projected/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-kube-api-access-z5dwk\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.056370 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.163943 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.392739 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.595410 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.649789 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9c381eb3-f466-40b7-a962-6782db85678c","Type":"ContainerStarted","Data":"ea4e61ca56864d9de459baad0f4c81082f30c2ef2d1fe46df68e1ceeee287427"} Nov 28 16:35:20 crc kubenswrapper[4954]: I1128 16:35:20.841824 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:21 crc kubenswrapper[4954]: I1128 16:35:21.659983 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8eae8ee3-3bd5-4463-b936-a237fbf26d2a","Type":"ContainerStarted","Data":"dc83373818f1abdd0e1f401aaee8c5b1e34a1e198c9f5ee9a78a8054b0389526"} Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.675611 4954 generic.go:334] "Generic (PLEG): container finished" podID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerID="28ca19b93b21234708bae450568d1c9f9e31b5ac55effc0128c40c7f2cb976be" exitCode=0 Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.675927 4954 generic.go:334] "Generic (PLEG): container finished" podID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerID="8e6ada87f371353aaa2d403fef939effc0cb40a0b79e100193235b21cefae6e4" exitCode=2 Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.675936 4954 generic.go:334] "Generic (PLEG): container finished" podID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerID="c79523201917283c28abf14b4967c0d52c990e283b8ce254873811fca6ee5e33" exitCode=0 Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.675945 4954 generic.go:334] "Generic (PLEG): container finished" podID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerID="38a3386eaf76a0ef28e0bc25f15e90345e3a051125b4898ffafed6dcbf86e981" exitCode=0 Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.675702 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerDied","Data":"28ca19b93b21234708bae450568d1c9f9e31b5ac55effc0128c40c7f2cb976be"} Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.676008 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerDied","Data":"8e6ada87f371353aaa2d403fef939effc0cb40a0b79e100193235b21cefae6e4"} Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.676026 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerDied","Data":"c79523201917283c28abf14b4967c0d52c990e283b8ce254873811fca6ee5e33"} Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.676039 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerDied","Data":"38a3386eaf76a0ef28e0bc25f15e90345e3a051125b4898ffafed6dcbf86e981"} Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.677805 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8eae8ee3-3bd5-4463-b936-a237fbf26d2a","Type":"ContainerStarted","Data":"58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522"} Nov 28 16:35:22 crc kubenswrapper[4954]: I1128 16:35:22.699344 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.699326632 podStartE2EDuration="3.699326632s" podCreationTimestamp="2025-11-28 16:35:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:22.692637416 +0000 UTC m=+1476.084305967" watchObservedRunningTime="2025-11-28 16:35:22.699326632 +0000 UTC m=+1476.090995173" Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.609928 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.672220 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-sg-core-conf-yaml\") pod \"5a7176ea-3a4e-4259-9419-f4561f6d443f\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.672300 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-run-httpd\") pod \"5a7176ea-3a4e-4259-9419-f4561f6d443f\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.672473 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8qfm\" (UniqueName: \"kubernetes.io/projected/5a7176ea-3a4e-4259-9419-f4561f6d443f-kube-api-access-k8qfm\") pod \"5a7176ea-3a4e-4259-9419-f4561f6d443f\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.672584 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-scripts\") pod \"5a7176ea-3a4e-4259-9419-f4561f6d443f\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.673907 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5a7176ea-3a4e-4259-9419-f4561f6d443f" (UID: "5a7176ea-3a4e-4259-9419-f4561f6d443f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.675557 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-log-httpd\") pod \"5a7176ea-3a4e-4259-9419-f4561f6d443f\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.675722 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-config-data\") pod \"5a7176ea-3a4e-4259-9419-f4561f6d443f\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " Nov 28 16:35:23 crc kubenswrapper[4954]: I1128 16:35:23.675785 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-combined-ca-bundle\") pod \"5a7176ea-3a4e-4259-9419-f4561f6d443f\" (UID: \"5a7176ea-3a4e-4259-9419-f4561f6d443f\") " Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.676044 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5a7176ea-3a4e-4259-9419-f4561f6d443f" (UID: "5a7176ea-3a4e-4259-9419-f4561f6d443f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.676991 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.677013 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5a7176ea-3a4e-4259-9419-f4561f6d443f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.680833 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a7176ea-3a4e-4259-9419-f4561f6d443f-kube-api-access-k8qfm" (OuterVolumeSpecName: "kube-api-access-k8qfm") pod "5a7176ea-3a4e-4259-9419-f4561f6d443f" (UID: "5a7176ea-3a4e-4259-9419-f4561f6d443f"). InnerVolumeSpecName "kube-api-access-k8qfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.691233 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9c381eb3-f466-40b7-a962-6782db85678c","Type":"ContainerStarted","Data":"722b392e14e483b0e2f1d174ef2d136d04550f8d796a74666ddf9f8253b0b8ec"} Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.693231 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.710320 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.713200 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5a7176ea-3a4e-4259-9419-f4561f6d443f","Type":"ContainerDied","Data":"eead86bc128f26ddd9d6ef2a72b6487ca9fdaa5f6733d49d9dc2010935076ec8"} Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.713261 4954 scope.go:117] "RemoveContainer" containerID="28ca19b93b21234708bae450568d1c9f9e31b5ac55effc0128c40c7f2cb976be" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.713370 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-scripts" (OuterVolumeSpecName: "scripts") pod "5a7176ea-3a4e-4259-9419-f4561f6d443f" (UID: "5a7176ea-3a4e-4259-9419-f4561f6d443f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.718734 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5a7176ea-3a4e-4259-9419-f4561f6d443f" (UID: "5a7176ea-3a4e-4259-9419-f4561f6d443f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.721082 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.45221518 podStartE2EDuration="4.721064336s" podCreationTimestamp="2025-11-28 16:35:19 +0000 UTC" firstStartedPulling="2025-11-28 16:35:20.602224844 +0000 UTC m=+1473.993893385" lastFinishedPulling="2025-11-28 16:35:22.871074 +0000 UTC m=+1476.262742541" observedRunningTime="2025-11-28 16:35:23.715629031 +0000 UTC m=+1477.107297582" watchObservedRunningTime="2025-11-28 16:35:23.721064336 +0000 UTC m=+1477.112732877" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.779076 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.779106 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8qfm\" (UniqueName: \"kubernetes.io/projected/5a7176ea-3a4e-4259-9419-f4561f6d443f-kube-api-access-k8qfm\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.779116 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.788811 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a7176ea-3a4e-4259-9419-f4561f6d443f" (UID: "5a7176ea-3a4e-4259-9419-f4561f6d443f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.795592 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-config-data" (OuterVolumeSpecName: "config-data") pod "5a7176ea-3a4e-4259-9419-f4561f6d443f" (UID: "5a7176ea-3a4e-4259-9419-f4561f6d443f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.838059 4954 scope.go:117] "RemoveContainer" containerID="8e6ada87f371353aaa2d403fef939effc0cb40a0b79e100193235b21cefae6e4" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.857278 4954 scope.go:117] "RemoveContainer" containerID="c79523201917283c28abf14b4967c0d52c990e283b8ce254873811fca6ee5e33" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.876293 4954 scope.go:117] "RemoveContainer" containerID="38a3386eaf76a0ef28e0bc25f15e90345e3a051125b4898ffafed6dcbf86e981" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.880580 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:23.880604 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a7176ea-3a4e-4259-9419-f4561f6d443f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.055849 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.065844 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.089293 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:24 crc kubenswrapper[4954]: E1128 16:35:24.089911 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="proxy-httpd" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.089927 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="proxy-httpd" Nov 28 16:35:24 crc kubenswrapper[4954]: E1128 16:35:24.089945 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-notification-agent" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.089954 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-notification-agent" Nov 28 16:35:24 crc kubenswrapper[4954]: E1128 16:35:24.089969 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-central-agent" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.089979 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-central-agent" Nov 28 16:35:24 crc kubenswrapper[4954]: E1128 16:35:24.090004 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="sg-core" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.090012 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="sg-core" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.090239 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-notification-agent" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.090266 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="sg-core" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.090283 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="proxy-httpd" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.090301 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" containerName="ceilometer-central-agent" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.092606 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.097605 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.097804 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.098112 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.103748 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186079 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-config-data\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186206 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186282 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8pqz\" (UniqueName: \"kubernetes.io/projected/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-kube-api-access-b8pqz\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186347 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-scripts\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186398 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186574 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-log-httpd\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186644 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.186683 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-run-httpd\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.288701 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-config-data\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.288826 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.288871 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8pqz\" (UniqueName: \"kubernetes.io/projected/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-kube-api-access-b8pqz\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.288909 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-scripts\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.288953 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.288982 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-log-httpd\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.289009 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.289032 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-run-httpd\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.291967 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-log-httpd\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.291977 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-run-httpd\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.292714 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-config-data\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.293993 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.294031 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-scripts\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.294668 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.305203 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.307781 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8pqz\" (UniqueName: \"kubernetes.io/projected/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-kube-api-access-b8pqz\") pod \"ceilometer-0\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.417974 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:24 crc kubenswrapper[4954]: I1128 16:35:24.985818 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:25 crc kubenswrapper[4954]: I1128 16:35:25.393117 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:35:25 crc kubenswrapper[4954]: I1128 16:35:25.737079 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerStarted","Data":"6ed064c7b701c58ab24a4ad1ed862f79a3769a5330ef32fac44d740baa155abc"} Nov 28 16:35:25 crc kubenswrapper[4954]: I1128 16:35:25.868166 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a7176ea-3a4e-4259-9419-f4561f6d443f" path="/var/lib/kubelet/pods/5a7176ea-3a4e-4259-9419-f4561f6d443f/volumes" Nov 28 16:35:25 crc kubenswrapper[4954]: I1128 16:35:25.952336 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:35:25 crc kubenswrapper[4954]: I1128 16:35:25.957049 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:35:25 crc kubenswrapper[4954]: I1128 16:35:25.959984 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:35:26 crc kubenswrapper[4954]: I1128 16:35:26.750512 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerStarted","Data":"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81"} Nov 28 16:35:26 crc kubenswrapper[4954]: I1128 16:35:26.758039 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:35:27 crc kubenswrapper[4954]: I1128 16:35:27.847445 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:35:27 crc kubenswrapper[4954]: I1128 16:35:27.848223 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:35:27 crc kubenswrapper[4954]: I1128 16:35:27.849578 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:35:27 crc kubenswrapper[4954]: I1128 16:35:27.851835 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:35:28 crc kubenswrapper[4954]: I1128 16:35:28.772259 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerStarted","Data":"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561"} Nov 28 16:35:28 crc kubenswrapper[4954]: I1128 16:35:28.772813 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:35:28 crc kubenswrapper[4954]: I1128 16:35:28.775896 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:35:28 crc kubenswrapper[4954]: I1128 16:35:28.967338 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-dr7bj"] Nov 28 16:35:28 crc kubenswrapper[4954]: I1128 16:35:28.969353 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.004607 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-dr7bj"] Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.090427 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.090598 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.090632 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhx6x\" (UniqueName: \"kubernetes.io/projected/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-kube-api-access-dhx6x\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.090665 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.090693 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-config\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.090719 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.192163 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.192229 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhx6x\" (UniqueName: \"kubernetes.io/projected/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-kube-api-access-dhx6x\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.192294 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.192341 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-config\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.192378 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.192445 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.193348 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-config\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.193369 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.193478 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.193645 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.193727 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.216299 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhx6x\" (UniqueName: \"kubernetes.io/projected/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-kube-api-access-dhx6x\") pod \"dnsmasq-dns-cd5cbd7b9-dr7bj\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.300058 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:29 crc kubenswrapper[4954]: I1128 16:35:29.795709 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-dr7bj"] Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.068992 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.393806 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.433073 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.790218 4954 generic.go:334] "Generic (PLEG): container finished" podID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerID="a8575b0b135c0db7c127a5b6990ea8e36cac76374ed530e9480e0950abe2fca1" exitCode=0 Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.790284 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" event={"ID":"52dbe85a-ccd3-4527-af8d-17ad9748d3c4","Type":"ContainerDied","Data":"a8575b0b135c0db7c127a5b6990ea8e36cac76374ed530e9480e0950abe2fca1"} Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.790315 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" event={"ID":"52dbe85a-ccd3-4527-af8d-17ad9748d3c4","Type":"ContainerStarted","Data":"0659b27ad1225d8e02c463bc35598a923f0eb41e7a5417f13901801c5ec87d01"} Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.793640 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerStarted","Data":"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607"} Nov 28 16:35:30 crc kubenswrapper[4954]: I1128 16:35:30.827991 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:35:31 crc kubenswrapper[4954]: I1128 16:35:31.803291 4954 generic.go:334] "Generic (PLEG): container finished" podID="889868e6-7e39-4413-9193-4b222c14efbd" containerID="a3125dcd69b8118e16e5518e30671dc7efc580d681489be8f966dc24e6e1ba69" exitCode=137 Nov 28 16:35:31 crc kubenswrapper[4954]: I1128 16:35:31.803916 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"889868e6-7e39-4413-9193-4b222c14efbd","Type":"ContainerDied","Data":"a3125dcd69b8118e16e5518e30671dc7efc580d681489be8f966dc24e6e1ba69"} Nov 28 16:35:31 crc kubenswrapper[4954]: I1128 16:35:31.806305 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" event={"ID":"52dbe85a-ccd3-4527-af8d-17ad9748d3c4","Type":"ContainerStarted","Data":"346a713c806456b0618fdd74822beca4c32ebb91c0170c66b810d1f4091e8caf"} Nov 28 16:35:31 crc kubenswrapper[4954]: I1128 16:35:31.834013 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" podStartSLOduration=3.833996553 podStartE2EDuration="3.833996553s" podCreationTimestamp="2025-11-28 16:35:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:31.82554855 +0000 UTC m=+1485.217217091" watchObservedRunningTime="2025-11-28 16:35:31.833996553 +0000 UTC m=+1485.225665094" Nov 28 16:35:31 crc kubenswrapper[4954]: I1128 16:35:31.906504 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.076459 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.076734 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-log" containerID="cri-o://4205f08df3d9e3e41f04d1d7b5eb3f1b08f78f9afea057c569d969d88dd59254" gracePeriod=30 Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.077228 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-api" containerID="cri-o://541d577d54af348454af761b14e45c18f84c95b0a7a06bc84d9d4ef605761925" gracePeriod=30 Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.476863 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.571742 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-combined-ca-bundle\") pod \"889868e6-7e39-4413-9193-4b222c14efbd\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.571956 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-config-data\") pod \"889868e6-7e39-4413-9193-4b222c14efbd\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.571983 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lc7fl\" (UniqueName: \"kubernetes.io/projected/889868e6-7e39-4413-9193-4b222c14efbd-kube-api-access-lc7fl\") pod \"889868e6-7e39-4413-9193-4b222c14efbd\" (UID: \"889868e6-7e39-4413-9193-4b222c14efbd\") " Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.578700 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/889868e6-7e39-4413-9193-4b222c14efbd-kube-api-access-lc7fl" (OuterVolumeSpecName: "kube-api-access-lc7fl") pod "889868e6-7e39-4413-9193-4b222c14efbd" (UID: "889868e6-7e39-4413-9193-4b222c14efbd"). InnerVolumeSpecName "kube-api-access-lc7fl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.617739 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "889868e6-7e39-4413-9193-4b222c14efbd" (UID: "889868e6-7e39-4413-9193-4b222c14efbd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.636090 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-config-data" (OuterVolumeSpecName: "config-data") pod "889868e6-7e39-4413-9193-4b222c14efbd" (UID: "889868e6-7e39-4413-9193-4b222c14efbd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.674733 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.674765 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/889868e6-7e39-4413-9193-4b222c14efbd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.674776 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lc7fl\" (UniqueName: \"kubernetes.io/projected/889868e6-7e39-4413-9193-4b222c14efbd-kube-api-access-lc7fl\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.820118 4954 generic.go:334] "Generic (PLEG): container finished" podID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerID="4205f08df3d9e3e41f04d1d7b5eb3f1b08f78f9afea057c569d969d88dd59254" exitCode=143 Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.820173 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3314fcc-d027-4e35-8739-b7f590c0cfb9","Type":"ContainerDied","Data":"4205f08df3d9e3e41f04d1d7b5eb3f1b08f78f9afea057c569d969d88dd59254"} Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.822234 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.822590 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"889868e6-7e39-4413-9193-4b222c14efbd","Type":"ContainerDied","Data":"d5c65cb3d48133ad6835834df891ea27dac46147293d13afbd6bd51c0e694aeb"} Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.822624 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.822644 4954 scope.go:117] "RemoveContainer" containerID="a3125dcd69b8118e16e5518e30671dc7efc580d681489be8f966dc24e6e1ba69" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.909701 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.920046 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.945757 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:35:32 crc kubenswrapper[4954]: E1128 16:35:32.946252 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889868e6-7e39-4413-9193-4b222c14efbd" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.946269 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="889868e6-7e39-4413-9193-4b222c14efbd" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.946491 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="889868e6-7e39-4413-9193-4b222c14efbd" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.947361 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.950031 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.950559 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.950656 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 16:35:32 crc kubenswrapper[4954]: I1128 16:35:32.955381 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.094711 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.095145 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.095273 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9559p\" (UniqueName: \"kubernetes.io/projected/cf74e9b3-c272-47a3-bd81-1fae19e39236-kube-api-access-9559p\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.095317 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.095410 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.196782 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.197214 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9559p\" (UniqueName: \"kubernetes.io/projected/cf74e9b3-c272-47a3-bd81-1fae19e39236-kube-api-access-9559p\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.197299 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.197454 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.197600 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.202121 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.206222 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.206904 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.207091 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.218588 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9559p\" (UniqueName: \"kubernetes.io/projected/cf74e9b3-c272-47a3-bd81-1fae19e39236-kube-api-access-9559p\") pod \"nova-cell1-novncproxy-0\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.273028 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.832052 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.848476 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerStarted","Data":"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036"} Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.849015 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-central-agent" containerID="cri-o://81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" gracePeriod=30 Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.849077 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.849105 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="proxy-httpd" containerID="cri-o://75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" gracePeriod=30 Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.849151 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="sg-core" containerID="cri-o://2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" gracePeriod=30 Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.849197 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-notification-agent" containerID="cri-o://68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" gracePeriod=30 Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.857802 4954 generic.go:334] "Generic (PLEG): container finished" podID="e15090c7-7e49-4a9e-bad2-65482065e048" containerID="8c6a6aea7a9dab1c90db663a53190b3d385f2ed88fd26fba749f3549c251570f" exitCode=0 Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.869648 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="889868e6-7e39-4413-9193-4b222c14efbd" path="/var/lib/kubelet/pods/889868e6-7e39-4413-9193-4b222c14efbd/volumes" Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.870276 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" event={"ID":"e15090c7-7e49-4a9e-bad2-65482065e048","Type":"ContainerDied","Data":"8c6a6aea7a9dab1c90db663a53190b3d385f2ed88fd26fba749f3549c251570f"} Nov 28 16:35:33 crc kubenswrapper[4954]: I1128 16:35:33.903508 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.875011977 podStartE2EDuration="9.90348615s" podCreationTimestamp="2025-11-28 16:35:24 +0000 UTC" firstStartedPulling="2025-11-28 16:35:24.995271591 +0000 UTC m=+1478.386940142" lastFinishedPulling="2025-11-28 16:35:33.023745774 +0000 UTC m=+1486.415414315" observedRunningTime="2025-11-28 16:35:33.88023872 +0000 UTC m=+1487.271907261" watchObservedRunningTime="2025-11-28 16:35:33.90348615 +0000 UTC m=+1487.295154691" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.647744 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.729776 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-scripts\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.729889 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-sg-core-conf-yaml\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.729938 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8pqz\" (UniqueName: \"kubernetes.io/projected/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-kube-api-access-b8pqz\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.729996 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-log-httpd\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.730030 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-run-httpd\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.730076 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-ceilometer-tls-certs\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.730143 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-combined-ca-bundle\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.730217 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-config-data\") pod \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\" (UID: \"b880f39f-4ae7-4798-9f13-5d9da7eaa94b\") " Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.730518 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.730752 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.730817 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.735642 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-scripts" (OuterVolumeSpecName: "scripts") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.736161 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-kube-api-access-b8pqz" (OuterVolumeSpecName: "kube-api-access-b8pqz") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "kube-api-access-b8pqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.768930 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.799339 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.817252 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.832830 4954 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.833792 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.833886 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.834025 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.834126 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8pqz\" (UniqueName: \"kubernetes.io/projected/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-kube-api-access-b8pqz\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.834253 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.842366 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-config-data" (OuterVolumeSpecName: "config-data") pod "b880f39f-4ae7-4798-9f13-5d9da7eaa94b" (UID: "b880f39f-4ae7-4798-9f13-5d9da7eaa94b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871046 4954 generic.go:334] "Generic (PLEG): container finished" podID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerID="75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" exitCode=0 Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871074 4954 generic.go:334] "Generic (PLEG): container finished" podID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerID="2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" exitCode=2 Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871081 4954 generic.go:334] "Generic (PLEG): container finished" podID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerID="68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" exitCode=0 Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871088 4954 generic.go:334] "Generic (PLEG): container finished" podID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerID="81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" exitCode=0 Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871134 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerDied","Data":"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036"} Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871135 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871162 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerDied","Data":"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607"} Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871173 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerDied","Data":"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561"} Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871184 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerDied","Data":"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81"} Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871192 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b880f39f-4ae7-4798-9f13-5d9da7eaa94b","Type":"ContainerDied","Data":"6ed064c7b701c58ab24a4ad1ed862f79a3769a5330ef32fac44d740baa155abc"} Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.871206 4954 scope.go:117] "RemoveContainer" containerID="75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.874014 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cf74e9b3-c272-47a3-bd81-1fae19e39236","Type":"ContainerStarted","Data":"029089c638fd3d3d0c67c5be6319770aaa300384c28cd15befea0e4ab7c84c99"} Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.874049 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cf74e9b3-c272-47a3-bd81-1fae19e39236","Type":"ContainerStarted","Data":"d0df4df86517cbd59da3f0bb803cc7c70ed2d25f268c0322a9b958411a1d31b3"} Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.920723 4954 scope.go:117] "RemoveContainer" containerID="2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.925149 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.925125861 podStartE2EDuration="2.925125861s" podCreationTimestamp="2025-11-28 16:35:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:34.901250161 +0000 UTC m=+1488.292918712" watchObservedRunningTime="2025-11-28 16:35:34.925125861 +0000 UTC m=+1488.316794402" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.938155 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b880f39f-4ae7-4798-9f13-5d9da7eaa94b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.966732 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:34 crc kubenswrapper[4954]: I1128 16:35:34.990128 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.005818 4954 scope.go:117] "RemoveContainer" containerID="68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.012454 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.012858 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="proxy-httpd" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.012870 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="proxy-httpd" Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.012886 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="sg-core" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.012892 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="sg-core" Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.012941 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-notification-agent" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.012947 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-notification-agent" Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.012973 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-central-agent" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.012979 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-central-agent" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.013611 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-notification-agent" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.013628 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="sg-core" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.013642 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="proxy-httpd" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.013662 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" containerName="ceilometer-central-agent" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.015332 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.020061 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.020352 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.020282 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.026223 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.067673 4954 scope.go:117] "RemoveContainer" containerID="81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.090211 4954 scope.go:117] "RemoveContainer" containerID="75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.090750 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": container with ID starting with 75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036 not found: ID does not exist" containerID="75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.090781 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036"} err="failed to get container status \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": rpc error: code = NotFound desc = could not find container \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": container with ID starting with 75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.090808 4954 scope.go:117] "RemoveContainer" containerID="2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.092000 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": container with ID starting with 2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607 not found: ID does not exist" containerID="2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.092028 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607"} err="failed to get container status \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": rpc error: code = NotFound desc = could not find container \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": container with ID starting with 2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.092049 4954 scope.go:117] "RemoveContainer" containerID="68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.092714 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": container with ID starting with 68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561 not found: ID does not exist" containerID="68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.092740 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561"} err="failed to get container status \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": rpc error: code = NotFound desc = could not find container \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": container with ID starting with 68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.092754 4954 scope.go:117] "RemoveContainer" containerID="81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.093141 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": container with ID starting with 81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81 not found: ID does not exist" containerID="81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.093198 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81"} err="failed to get container status \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": rpc error: code = NotFound desc = could not find container \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": container with ID starting with 81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.093227 4954 scope.go:117] "RemoveContainer" containerID="75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.093652 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036"} err="failed to get container status \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": rpc error: code = NotFound desc = could not find container \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": container with ID starting with 75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.093676 4954 scope.go:117] "RemoveContainer" containerID="2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.093980 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607"} err="failed to get container status \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": rpc error: code = NotFound desc = could not find container \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": container with ID starting with 2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.094005 4954 scope.go:117] "RemoveContainer" containerID="68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.094324 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561"} err="failed to get container status \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": rpc error: code = NotFound desc = could not find container \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": container with ID starting with 68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.094352 4954 scope.go:117] "RemoveContainer" containerID="81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.094688 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81"} err="failed to get container status \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": rpc error: code = NotFound desc = could not find container \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": container with ID starting with 81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.094715 4954 scope.go:117] "RemoveContainer" containerID="75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.094974 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036"} err="failed to get container status \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": rpc error: code = NotFound desc = could not find container \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": container with ID starting with 75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.094998 4954 scope.go:117] "RemoveContainer" containerID="2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095209 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607"} err="failed to get container status \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": rpc error: code = NotFound desc = could not find container \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": container with ID starting with 2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095233 4954 scope.go:117] "RemoveContainer" containerID="68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095449 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561"} err="failed to get container status \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": rpc error: code = NotFound desc = could not find container \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": container with ID starting with 68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095471 4954 scope.go:117] "RemoveContainer" containerID="81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095676 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81"} err="failed to get container status \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": rpc error: code = NotFound desc = could not find container \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": container with ID starting with 81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095702 4954 scope.go:117] "RemoveContainer" containerID="75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095964 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036"} err="failed to get container status \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": rpc error: code = NotFound desc = could not find container \"75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036\": container with ID starting with 75b0892bbd9ff006e8962c64945dfb93966efbf927881316f7f525468185d036 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.095993 4954 scope.go:117] "RemoveContainer" containerID="2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.096828 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607"} err="failed to get container status \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": rpc error: code = NotFound desc = could not find container \"2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607\": container with ID starting with 2d655e35583259c70308d1363b7539ac874292f44b72ace49256a8af2a6cb607 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.096865 4954 scope.go:117] "RemoveContainer" containerID="68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.097045 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561"} err="failed to get container status \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": rpc error: code = NotFound desc = could not find container \"68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561\": container with ID starting with 68607edacaf00429ca4b15f7dff8e82325a10a30a7e1d032d8b3ddf2b1444561 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.097063 4954 scope.go:117] "RemoveContainer" containerID="81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.097465 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81"} err="failed to get container status \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": rpc error: code = NotFound desc = could not find container \"81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81\": container with ID starting with 81940f3468c67cea01de0ba02b307d94ff1972a0a2fc1ab24eb26da03fe54c81 not found: ID does not exist" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.142658 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s67r7\" (UniqueName: \"kubernetes.io/projected/7cef279d-a444-456d-8add-1998974a6e08-kube-api-access-s67r7\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.142735 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-config-data\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.142826 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-scripts\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.142847 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-log-httpd\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.142875 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.142908 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.142984 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.143017 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-run-httpd\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.245777 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-config-data\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.245828 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-scripts\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.245864 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-log-httpd\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.245889 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.246611 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-log-httpd\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.247881 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.248035 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.248087 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-run-httpd\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.248209 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s67r7\" (UniqueName: \"kubernetes.io/projected/7cef279d-a444-456d-8add-1998974a6e08-kube-api-access-s67r7\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.248436 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-run-httpd\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.251102 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.251200 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.251380 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.251389 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-scripts\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.253706 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-config-data\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.259248 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.268620 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s67r7\" (UniqueName: \"kubernetes.io/projected/7cef279d-a444-456d-8add-1998974a6e08-kube-api-access-s67r7\") pod \"ceilometer-0\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.348957 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-scripts\") pod \"e15090c7-7e49-4a9e-bad2-65482065e048\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.349261 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-config-data\") pod \"e15090c7-7e49-4a9e-bad2-65482065e048\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.349346 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zzxm\" (UniqueName: \"kubernetes.io/projected/e15090c7-7e49-4a9e-bad2-65482065e048-kube-api-access-2zzxm\") pod \"e15090c7-7e49-4a9e-bad2-65482065e048\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.349518 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-combined-ca-bundle\") pod \"e15090c7-7e49-4a9e-bad2-65482065e048\" (UID: \"e15090c7-7e49-4a9e-bad2-65482065e048\") " Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.350142 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.353466 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e15090c7-7e49-4a9e-bad2-65482065e048-kube-api-access-2zzxm" (OuterVolumeSpecName: "kube-api-access-2zzxm") pod "e15090c7-7e49-4a9e-bad2-65482065e048" (UID: "e15090c7-7e49-4a9e-bad2-65482065e048"). InnerVolumeSpecName "kube-api-access-2zzxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.353578 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-scripts" (OuterVolumeSpecName: "scripts") pod "e15090c7-7e49-4a9e-bad2-65482065e048" (UID: "e15090c7-7e49-4a9e-bad2-65482065e048"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.382313 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e15090c7-7e49-4a9e-bad2-65482065e048" (UID: "e15090c7-7e49-4a9e-bad2-65482065e048"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.385113 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-config-data" (OuterVolumeSpecName: "config-data") pod "e15090c7-7e49-4a9e-bad2-65482065e048" (UID: "e15090c7-7e49-4a9e-bad2-65482065e048"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.452277 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.452559 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.452576 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zzxm\" (UniqueName: \"kubernetes.io/projected/e15090c7-7e49-4a9e-bad2-65482065e048-kube-api-access-2zzxm\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.452590 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e15090c7-7e49-4a9e-bad2-65482065e048-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.805642 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.866591 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b880f39f-4ae7-4798-9f13-5d9da7eaa94b" path="/var/lib/kubelet/pods/b880f39f-4ae7-4798-9f13-5d9da7eaa94b/volumes" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.901081 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" event={"ID":"e15090c7-7e49-4a9e-bad2-65482065e048","Type":"ContainerDied","Data":"ed00f68d5a20ee754e6f2b7c197bc0ab387fe579e99d73c678e80a9f7c388ac9"} Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.901285 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed00f68d5a20ee754e6f2b7c197bc0ab387fe579e99d73c678e80a9f7c388ac9" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.901383 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-pbz9g" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.907924 4954 generic.go:334] "Generic (PLEG): container finished" podID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerID="541d577d54af348454af761b14e45c18f84c95b0a7a06bc84d9d4ef605761925" exitCode=0 Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.908157 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3314fcc-d027-4e35-8739-b7f590c0cfb9","Type":"ContainerDied","Data":"541d577d54af348454af761b14e45c18f84c95b0a7a06bc84d9d4ef605761925"} Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.912158 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerStarted","Data":"6f42dbbb98e2b3df9dc95cc8c60bf46034efa15d17bb2c64b7a730ed3ce33760"} Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.991128 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:35:35 crc kubenswrapper[4954]: E1128 16:35:35.991563 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e15090c7-7e49-4a9e-bad2-65482065e048" containerName="nova-cell1-conductor-db-sync" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.991580 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e15090c7-7e49-4a9e-bad2-65482065e048" containerName="nova-cell1-conductor-db-sync" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.991780 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="e15090c7-7e49-4a9e-bad2-65482065e048" containerName="nova-cell1-conductor-db-sync" Nov 28 16:35:35 crc kubenswrapper[4954]: I1128 16:35:35.992559 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.002315 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.005637 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.166017 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.166377 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkbdx\" (UniqueName: \"kubernetes.io/projected/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-kube-api-access-mkbdx\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.166485 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.269139 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.269646 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkbdx\" (UniqueName: \"kubernetes.io/projected/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-kube-api-access-mkbdx\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.269706 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.275764 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.275848 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.295230 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkbdx\" (UniqueName: \"kubernetes.io/projected/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-kube-api-access-mkbdx\") pod \"nova-cell1-conductor-0\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.309423 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.414932 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.576302 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3314fcc-d027-4e35-8739-b7f590c0cfb9-logs\") pod \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.576864 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-config-data\") pod \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.576891 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-combined-ca-bundle\") pod \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.576907 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3314fcc-d027-4e35-8739-b7f590c0cfb9-logs" (OuterVolumeSpecName: "logs") pod "b3314fcc-d027-4e35-8739-b7f590c0cfb9" (UID: "b3314fcc-d027-4e35-8739-b7f590c0cfb9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.576947 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jv4w\" (UniqueName: \"kubernetes.io/projected/b3314fcc-d027-4e35-8739-b7f590c0cfb9-kube-api-access-6jv4w\") pod \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\" (UID: \"b3314fcc-d027-4e35-8739-b7f590c0cfb9\") " Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.577721 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3314fcc-d027-4e35-8739-b7f590c0cfb9-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.597198 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3314fcc-d027-4e35-8739-b7f590c0cfb9-kube-api-access-6jv4w" (OuterVolumeSpecName: "kube-api-access-6jv4w") pod "b3314fcc-d027-4e35-8739-b7f590c0cfb9" (UID: "b3314fcc-d027-4e35-8739-b7f590c0cfb9"). InnerVolumeSpecName "kube-api-access-6jv4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.608970 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-config-data" (OuterVolumeSpecName: "config-data") pod "b3314fcc-d027-4e35-8739-b7f590c0cfb9" (UID: "b3314fcc-d027-4e35-8739-b7f590c0cfb9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.610092 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3314fcc-d027-4e35-8739-b7f590c0cfb9" (UID: "b3314fcc-d027-4e35-8739-b7f590c0cfb9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.679174 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.679209 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3314fcc-d027-4e35-8739-b7f590c0cfb9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.679219 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jv4w\" (UniqueName: \"kubernetes.io/projected/b3314fcc-d027-4e35-8739-b7f590c0cfb9-kube-api-access-6jv4w\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.890397 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:35:36 crc kubenswrapper[4954]: W1128 16:35:36.981649 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43af6ac7_70c5_43ff_84a0_0f6b6159ae66.slice/crio-5fcfd5dc09dbb15a96c3f82dc25914b5ddddbb38994aec53a9e84ffb97651a0b WatchSource:0}: Error finding container 5fcfd5dc09dbb15a96c3f82dc25914b5ddddbb38994aec53a9e84ffb97651a0b: Status 404 returned error can't find the container with id 5fcfd5dc09dbb15a96c3f82dc25914b5ddddbb38994aec53a9e84ffb97651a0b Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.992172 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3314fcc-d027-4e35-8739-b7f590c0cfb9","Type":"ContainerDied","Data":"5a0ae0b4e7116cc59c4f09f6de952d0d914401bcbed00b3ba2189c326666c7e4"} Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.992230 4954 scope.go:117] "RemoveContainer" containerID="541d577d54af348454af761b14e45c18f84c95b0a7a06bc84d9d4ef605761925" Nov 28 16:35:36 crc kubenswrapper[4954]: I1128 16:35:36.992435 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.035125 4954 scope.go:117] "RemoveContainer" containerID="4205f08df3d9e3e41f04d1d7b5eb3f1b08f78f9afea057c569d969d88dd59254" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.068487 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.080745 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.114494 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:37 crc kubenswrapper[4954]: E1128 16:35:37.115035 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-api" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.115058 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-api" Nov 28 16:35:37 crc kubenswrapper[4954]: E1128 16:35:37.115162 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-log" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.115171 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-log" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.115394 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-api" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.115431 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" containerName="nova-api-log" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.116858 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.122747 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.122848 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.122848 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.152374 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.259030 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sfsv\" (UniqueName: \"kubernetes.io/projected/08079426-ad38-42d6-a577-b3f6a39fcf7b-kube-api-access-7sfsv\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.259349 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.259370 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-config-data\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.259814 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08079426-ad38-42d6-a577-b3f6a39fcf7b-logs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.259865 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.259891 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.361429 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sfsv\" (UniqueName: \"kubernetes.io/projected/08079426-ad38-42d6-a577-b3f6a39fcf7b-kube-api-access-7sfsv\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.361483 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.361504 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-config-data\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.361562 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08079426-ad38-42d6-a577-b3f6a39fcf7b-logs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.361601 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.361624 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.362694 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08079426-ad38-42d6-a577-b3f6a39fcf7b-logs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.366060 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.366360 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.366996 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.368208 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-config-data\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.382493 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sfsv\" (UniqueName: \"kubernetes.io/projected/08079426-ad38-42d6-a577-b3f6a39fcf7b-kube-api-access-7sfsv\") pod \"nova-api-0\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.464518 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.882880 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3314fcc-d027-4e35-8739-b7f590c0cfb9" path="/var/lib/kubelet/pods/b3314fcc-d027-4e35-8739-b7f590c0cfb9/volumes" Nov 28 16:35:37 crc kubenswrapper[4954]: I1128 16:35:37.991059 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.027080 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerStarted","Data":"190064bf648d40792650ba482e7ac25d6d7c2fabc259baf1d0b1b45c5abca205"} Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.027261 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerStarted","Data":"f3b3be1f06f6ac14c8e5f8001d2e6742a30e971699d14e2b691a416c75f5f699"} Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.029339 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08079426-ad38-42d6-a577-b3f6a39fcf7b","Type":"ContainerStarted","Data":"6c5072715e1598a68b9e8faecfbf0e4f7e5934a8f284555426656acced79ffdf"} Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.032411 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"43af6ac7-70c5-43ff-84a0-0f6b6159ae66","Type":"ContainerStarted","Data":"b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775"} Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.032477 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"43af6ac7-70c5-43ff-84a0-0f6b6159ae66","Type":"ContainerStarted","Data":"5fcfd5dc09dbb15a96c3f82dc25914b5ddddbb38994aec53a9e84ffb97651a0b"} Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.032824 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.057902 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.05787934 podStartE2EDuration="3.05787934s" podCreationTimestamp="2025-11-28 16:35:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:38.053599213 +0000 UTC m=+1491.445267764" watchObservedRunningTime="2025-11-28 16:35:38.05787934 +0000 UTC m=+1491.449547881" Nov 28 16:35:38 crc kubenswrapper[4954]: I1128 16:35:38.273202 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:39 crc kubenswrapper[4954]: I1128 16:35:39.067110 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerStarted","Data":"12e64b79d883c0960f7d869751df2054fc61218ac1ea6063c9a82354936f8e17"} Nov 28 16:35:39 crc kubenswrapper[4954]: I1128 16:35:39.078821 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08079426-ad38-42d6-a577-b3f6a39fcf7b","Type":"ContainerStarted","Data":"0fd0232fabbca0961d47fa5bcd1b8efb037dba790423f7d845dda029f0e363b7"} Nov 28 16:35:39 crc kubenswrapper[4954]: I1128 16:35:39.078870 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08079426-ad38-42d6-a577-b3f6a39fcf7b","Type":"ContainerStarted","Data":"6c2fe3e317b6db7af3eef9438d7e70a3f49e296683477148b5dbea4ff402eb08"} Nov 28 16:35:39 crc kubenswrapper[4954]: I1128 16:35:39.303856 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:35:39 crc kubenswrapper[4954]: I1128 16:35:39.333648 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.333619714 podStartE2EDuration="2.333619714s" podCreationTimestamp="2025-11-28 16:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:39.109440626 +0000 UTC m=+1492.501109167" watchObservedRunningTime="2025-11-28 16:35:39.333619714 +0000 UTC m=+1492.725288255" Nov 28 16:35:39 crc kubenswrapper[4954]: I1128 16:35:39.378116 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5h2vz"] Nov 28 16:35:39 crc kubenswrapper[4954]: I1128 16:35:39.379703 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" podUID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerName="dnsmasq-dns" containerID="cri-o://dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5" gracePeriod=10 Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.013923 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.035584 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-swift-storage-0\") pod \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.035668 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-sb\") pod \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.035772 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-svc\") pod \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.035806 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-config\") pod \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.035954 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfbgw\" (UniqueName: \"kubernetes.io/projected/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-kube-api-access-gfbgw\") pod \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.036049 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-nb\") pod \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\" (UID: \"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed\") " Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.078002 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-kube-api-access-gfbgw" (OuterVolumeSpecName: "kube-api-access-gfbgw") pod "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" (UID: "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed"). InnerVolumeSpecName "kube-api-access-gfbgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.121515 4954 generic.go:334] "Generic (PLEG): container finished" podID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerID="dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5" exitCode=0 Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.123001 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.123789 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" event={"ID":"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed","Type":"ContainerDied","Data":"dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5"} Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.123830 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-5h2vz" event={"ID":"5f6d0ffb-cdea-415b-8e0d-3d93e8292eed","Type":"ContainerDied","Data":"412cee30518e9bdc6e65fabc00ac096214e3384f594333d4cc75e745ef35a3e6"} Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.123876 4954 scope.go:117] "RemoveContainer" containerID="dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.140121 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfbgw\" (UniqueName: \"kubernetes.io/projected/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-kube-api-access-gfbgw\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.148316 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-config" (OuterVolumeSpecName: "config") pod "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" (UID: "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.213605 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" (UID: "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.218260 4954 scope.go:117] "RemoveContainer" containerID="d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.222159 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" (UID: "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.222332 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" (UID: "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.239051 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" (UID: "5f6d0ffb-cdea-415b-8e0d-3d93e8292eed"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.242677 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.242712 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.242727 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.242740 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.242751 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.258999 4954 scope.go:117] "RemoveContainer" containerID="dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5" Nov 28 16:35:40 crc kubenswrapper[4954]: E1128 16:35:40.259556 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5\": container with ID starting with dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5 not found: ID does not exist" containerID="dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.259596 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5"} err="failed to get container status \"dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5\": rpc error: code = NotFound desc = could not find container \"dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5\": container with ID starting with dc91b1e18bd26c3630bb8350e1cd0990f680d3418d18bfaa0bb4a46d379b20a5 not found: ID does not exist" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.259622 4954 scope.go:117] "RemoveContainer" containerID="d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496" Nov 28 16:35:40 crc kubenswrapper[4954]: E1128 16:35:40.260192 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496\": container with ID starting with d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496 not found: ID does not exist" containerID="d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.260509 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496"} err="failed to get container status \"d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496\": rpc error: code = NotFound desc = could not find container \"d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496\": container with ID starting with d07e47f0469040d37fedb491ed123de77de8ea4c27aca19cfb9e5a922e45f496 not found: ID does not exist" Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.461582 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5h2vz"] Nov 28 16:35:40 crc kubenswrapper[4954]: I1128 16:35:40.471306 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-5h2vz"] Nov 28 16:35:41 crc kubenswrapper[4954]: I1128 16:35:41.134734 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerStarted","Data":"eec27ebb646c765f0cde3b186b7adb59c251c51c9a65684667cfbc3c2314c2a3"} Nov 28 16:35:41 crc kubenswrapper[4954]: I1128 16:35:41.135544 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 16:35:41 crc kubenswrapper[4954]: I1128 16:35:41.163931 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.960855729 podStartE2EDuration="7.163909889s" podCreationTimestamp="2025-11-28 16:35:34 +0000 UTC" firstStartedPulling="2025-11-28 16:35:35.817259055 +0000 UTC m=+1489.208927596" lastFinishedPulling="2025-11-28 16:35:40.020313215 +0000 UTC m=+1493.411981756" observedRunningTime="2025-11-28 16:35:41.161963046 +0000 UTC m=+1494.553631587" watchObservedRunningTime="2025-11-28 16:35:41.163909889 +0000 UTC m=+1494.555578440" Nov 28 16:35:41 crc kubenswrapper[4954]: I1128 16:35:41.868786 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" path="/var/lib/kubelet/pods/5f6d0ffb-cdea-415b-8e0d-3d93e8292eed/volumes" Nov 28 16:35:43 crc kubenswrapper[4954]: I1128 16:35:43.273253 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:43 crc kubenswrapper[4954]: I1128 16:35:43.295074 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:44 crc kubenswrapper[4954]: I1128 16:35:44.185630 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.339905 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.847778 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-5d8sw"] Nov 28 16:35:46 crc kubenswrapper[4954]: E1128 16:35:46.848577 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerName="init" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.848598 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerName="init" Nov 28 16:35:46 crc kubenswrapper[4954]: E1128 16:35:46.848633 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerName="dnsmasq-dns" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.848645 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerName="dnsmasq-dns" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.848918 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f6d0ffb-cdea-415b-8e0d-3d93e8292eed" containerName="dnsmasq-dns" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.849753 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.854230 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.854589 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.866504 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5d8sw"] Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.880363 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-config-data\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.880447 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq9xp\" (UniqueName: \"kubernetes.io/projected/5cf8cff4-ff61-4663-a23b-d68532916ee3-kube-api-access-dq9xp\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.880544 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-scripts\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.880636 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.982467 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-config-data\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.982579 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dq9xp\" (UniqueName: \"kubernetes.io/projected/5cf8cff4-ff61-4663-a23b-d68532916ee3-kube-api-access-dq9xp\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.982641 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-scripts\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.983890 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.989502 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-scripts\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.990794 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:46 crc kubenswrapper[4954]: I1128 16:35:46.991222 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-config-data\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:47 crc kubenswrapper[4954]: I1128 16:35:47.001341 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq9xp\" (UniqueName: \"kubernetes.io/projected/5cf8cff4-ff61-4663-a23b-d68532916ee3-kube-api-access-dq9xp\") pod \"nova-cell1-cell-mapping-5d8sw\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:47 crc kubenswrapper[4954]: I1128 16:35:47.173608 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:47 crc kubenswrapper[4954]: I1128 16:35:47.465983 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:47 crc kubenswrapper[4954]: I1128 16:35:47.466328 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:35:47 crc kubenswrapper[4954]: W1128 16:35:47.653577 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5cf8cff4_ff61_4663_a23b_d68532916ee3.slice/crio-b65b036195ef6d582929bd91a8ab9afbd52782d86afa83fc405c5d1ca87a9a73 WatchSource:0}: Error finding container b65b036195ef6d582929bd91a8ab9afbd52782d86afa83fc405c5d1ca87a9a73: Status 404 returned error can't find the container with id b65b036195ef6d582929bd91a8ab9afbd52782d86afa83fc405c5d1ca87a9a73 Nov 28 16:35:47 crc kubenswrapper[4954]: I1128 16:35:47.656357 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-5d8sw"] Nov 28 16:35:48 crc kubenswrapper[4954]: I1128 16:35:48.211617 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5d8sw" event={"ID":"5cf8cff4-ff61-4663-a23b-d68532916ee3","Type":"ContainerStarted","Data":"f395a2e9a2f791976be90eae75233a52a92153cf8e478c7bc3edfa54acb7f9ea"} Nov 28 16:35:48 crc kubenswrapper[4954]: I1128 16:35:48.211968 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5d8sw" event={"ID":"5cf8cff4-ff61-4663-a23b-d68532916ee3","Type":"ContainerStarted","Data":"b65b036195ef6d582929bd91a8ab9afbd52782d86afa83fc405c5d1ca87a9a73"} Nov 28 16:35:48 crc kubenswrapper[4954]: I1128 16:35:48.242218 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-5d8sw" podStartSLOduration=2.242187295 podStartE2EDuration="2.242187295s" podCreationTimestamp="2025-11-28 16:35:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:35:48.228475643 +0000 UTC m=+1501.620144184" watchObservedRunningTime="2025-11-28 16:35:48.242187295 +0000 UTC m=+1501.633855846" Nov 28 16:35:48 crc kubenswrapper[4954]: I1128 16:35:48.490912 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:48 crc kubenswrapper[4954]: I1128 16:35:48.490990 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:35:53 crc kubenswrapper[4954]: I1128 16:35:53.263150 4954 generic.go:334] "Generic (PLEG): container finished" podID="5cf8cff4-ff61-4663-a23b-d68532916ee3" containerID="f395a2e9a2f791976be90eae75233a52a92153cf8e478c7bc3edfa54acb7f9ea" exitCode=0 Nov 28 16:35:53 crc kubenswrapper[4954]: I1128 16:35:53.263297 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5d8sw" event={"ID":"5cf8cff4-ff61-4663-a23b-d68532916ee3","Type":"ContainerDied","Data":"f395a2e9a2f791976be90eae75233a52a92153cf8e478c7bc3edfa54acb7f9ea"} Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.676034 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.830395 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-scripts\") pod \"5cf8cff4-ff61-4663-a23b-d68532916ee3\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.830691 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-combined-ca-bundle\") pod \"5cf8cff4-ff61-4663-a23b-d68532916ee3\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.830729 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dq9xp\" (UniqueName: \"kubernetes.io/projected/5cf8cff4-ff61-4663-a23b-d68532916ee3-kube-api-access-dq9xp\") pod \"5cf8cff4-ff61-4663-a23b-d68532916ee3\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.830868 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-config-data\") pod \"5cf8cff4-ff61-4663-a23b-d68532916ee3\" (UID: \"5cf8cff4-ff61-4663-a23b-d68532916ee3\") " Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.835337 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-scripts" (OuterVolumeSpecName: "scripts") pod "5cf8cff4-ff61-4663-a23b-d68532916ee3" (UID: "5cf8cff4-ff61-4663-a23b-d68532916ee3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.838737 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cf8cff4-ff61-4663-a23b-d68532916ee3-kube-api-access-dq9xp" (OuterVolumeSpecName: "kube-api-access-dq9xp") pod "5cf8cff4-ff61-4663-a23b-d68532916ee3" (UID: "5cf8cff4-ff61-4663-a23b-d68532916ee3"). InnerVolumeSpecName "kube-api-access-dq9xp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.863130 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5cf8cff4-ff61-4663-a23b-d68532916ee3" (UID: "5cf8cff4-ff61-4663-a23b-d68532916ee3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.865805 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-config-data" (OuterVolumeSpecName: "config-data") pod "5cf8cff4-ff61-4663-a23b-d68532916ee3" (UID: "5cf8cff4-ff61-4663-a23b-d68532916ee3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.933544 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.933578 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.933617 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5cf8cff4-ff61-4663-a23b-d68532916ee3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:54.933636 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dq9xp\" (UniqueName: \"kubernetes.io/projected/5cf8cff4-ff61-4663-a23b-d68532916ee3-kube-api-access-dq9xp\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.285414 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-5d8sw" event={"ID":"5cf8cff4-ff61-4663-a23b-d68532916ee3","Type":"ContainerDied","Data":"b65b036195ef6d582929bd91a8ab9afbd52782d86afa83fc405c5d1ca87a9a73"} Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.285868 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b65b036195ef6d582929bd91a8ab9afbd52782d86afa83fc405c5d1ca87a9a73" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.285476 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-5d8sw" Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.471451 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.472043 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-log" containerID="cri-o://6c2fe3e317b6db7af3eef9438d7e70a3f49e296683477148b5dbea4ff402eb08" gracePeriod=30 Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.472550 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-api" containerID="cri-o://0fd0232fabbca0961d47fa5bcd1b8efb037dba790423f7d845dda029f0e363b7" gracePeriod=30 Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.505158 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.505397 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="8eae8ee3-3bd5-4463-b936-a237fbf26d2a" containerName="nova-scheduler-scheduler" containerID="cri-o://58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522" gracePeriod=30 Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.517785 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.518251 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-log" containerID="cri-o://9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8" gracePeriod=30 Nov 28 16:35:55 crc kubenswrapper[4954]: I1128 16:35:55.518297 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-metadata" containerID="cri-o://53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0" gracePeriod=30 Nov 28 16:35:56 crc kubenswrapper[4954]: I1128 16:35:56.322176 4954 generic.go:334] "Generic (PLEG): container finished" podID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerID="6c2fe3e317b6db7af3eef9438d7e70a3f49e296683477148b5dbea4ff402eb08" exitCode=143 Nov 28 16:35:56 crc kubenswrapper[4954]: I1128 16:35:56.322464 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08079426-ad38-42d6-a577-b3f6a39fcf7b","Type":"ContainerDied","Data":"6c2fe3e317b6db7af3eef9438d7e70a3f49e296683477148b5dbea4ff402eb08"} Nov 28 16:35:56 crc kubenswrapper[4954]: I1128 16:35:56.325428 4954 generic.go:334] "Generic (PLEG): container finished" podID="24190ac6-aa40-4fcf-8722-f86426458655" containerID="9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8" exitCode=143 Nov 28 16:35:56 crc kubenswrapper[4954]: I1128 16:35:56.325591 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24190ac6-aa40-4fcf-8722-f86426458655","Type":"ContainerDied","Data":"9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8"} Nov 28 16:35:58 crc kubenswrapper[4954]: I1128 16:35:58.658669 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:43828->10.217.0.191:8775: read: connection reset by peer" Nov 28 16:35:58 crc kubenswrapper[4954]: I1128 16:35:58.659919 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.191:8775/\": read tcp 10.217.0.2:43824->10.217.0.191:8775: read: connection reset by peer" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.114426 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.312870 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkqzr\" (UniqueName: \"kubernetes.io/projected/24190ac6-aa40-4fcf-8722-f86426458655-kube-api-access-pkqzr\") pod \"24190ac6-aa40-4fcf-8722-f86426458655\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.313272 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-nova-metadata-tls-certs\") pod \"24190ac6-aa40-4fcf-8722-f86426458655\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.313364 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-combined-ca-bundle\") pod \"24190ac6-aa40-4fcf-8722-f86426458655\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.313405 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-config-data\") pod \"24190ac6-aa40-4fcf-8722-f86426458655\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.313493 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24190ac6-aa40-4fcf-8722-f86426458655-logs\") pod \"24190ac6-aa40-4fcf-8722-f86426458655\" (UID: \"24190ac6-aa40-4fcf-8722-f86426458655\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.315436 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24190ac6-aa40-4fcf-8722-f86426458655-logs" (OuterVolumeSpecName: "logs") pod "24190ac6-aa40-4fcf-8722-f86426458655" (UID: "24190ac6-aa40-4fcf-8722-f86426458655"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.321812 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24190ac6-aa40-4fcf-8722-f86426458655-kube-api-access-pkqzr" (OuterVolumeSpecName: "kube-api-access-pkqzr") pod "24190ac6-aa40-4fcf-8722-f86426458655" (UID: "24190ac6-aa40-4fcf-8722-f86426458655"). InnerVolumeSpecName "kube-api-access-pkqzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.375632 4954 generic.go:334] "Generic (PLEG): container finished" podID="24190ac6-aa40-4fcf-8722-f86426458655" containerID="53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0" exitCode=0 Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.375744 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24190ac6-aa40-4fcf-8722-f86426458655","Type":"ContainerDied","Data":"53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0"} Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.375781 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24190ac6-aa40-4fcf-8722-f86426458655","Type":"ContainerDied","Data":"2fdbf8ebd7a580d447e6cac86a33a27326a4bb797380b2021583483db4e6ebbc"} Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.375803 4954 scope.go:117] "RemoveContainer" containerID="53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.375985 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.381060 4954 generic.go:334] "Generic (PLEG): container finished" podID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerID="0fd0232fabbca0961d47fa5bcd1b8efb037dba790423f7d845dda029f0e363b7" exitCode=0 Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.381103 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08079426-ad38-42d6-a577-b3f6a39fcf7b","Type":"ContainerDied","Data":"0fd0232fabbca0961d47fa5bcd1b8efb037dba790423f7d845dda029f0e363b7"} Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.382702 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-config-data" (OuterVolumeSpecName: "config-data") pod "24190ac6-aa40-4fcf-8722-f86426458655" (UID: "24190ac6-aa40-4fcf-8722-f86426458655"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.383711 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24190ac6-aa40-4fcf-8722-f86426458655" (UID: "24190ac6-aa40-4fcf-8722-f86426458655"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.388785 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "24190ac6-aa40-4fcf-8722-f86426458655" (UID: "24190ac6-aa40-4fcf-8722-f86426458655"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.404057 4954 scope.go:117] "RemoveContainer" containerID="9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.417100 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkqzr\" (UniqueName: \"kubernetes.io/projected/24190ac6-aa40-4fcf-8722-f86426458655-kube-api-access-pkqzr\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.417131 4954 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.417141 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.417150 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24190ac6-aa40-4fcf-8722-f86426458655-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.417160 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24190ac6-aa40-4fcf-8722-f86426458655-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.433892 4954 scope.go:117] "RemoveContainer" containerID="53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0" Nov 28 16:35:59 crc kubenswrapper[4954]: E1128 16:35:59.434613 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0\": container with ID starting with 53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0 not found: ID does not exist" containerID="53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.434649 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0"} err="failed to get container status \"53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0\": rpc error: code = NotFound desc = could not find container \"53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0\": container with ID starting with 53ef55a7695eb41cb8d3fc7b1d0ab0da9f56289fa26581dd8c718bc8e22a24b0 not found: ID does not exist" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.434671 4954 scope.go:117] "RemoveContainer" containerID="9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8" Nov 28 16:35:59 crc kubenswrapper[4954]: E1128 16:35:59.435213 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8\": container with ID starting with 9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8 not found: ID does not exist" containerID="9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.435266 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8"} err="failed to get container status \"9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8\": rpc error: code = NotFound desc = could not find container \"9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8\": container with ID starting with 9fa483e2242b4a74d8a2ac17f44066422f609f97c99ae94d124d88ea13944ec8 not found: ID does not exist" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.606074 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.722371 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-public-tls-certs\") pod \"08079426-ad38-42d6-a577-b3f6a39fcf7b\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.722575 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-combined-ca-bundle\") pod \"08079426-ad38-42d6-a577-b3f6a39fcf7b\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.722714 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-internal-tls-certs\") pod \"08079426-ad38-42d6-a577-b3f6a39fcf7b\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.722762 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sfsv\" (UniqueName: \"kubernetes.io/projected/08079426-ad38-42d6-a577-b3f6a39fcf7b-kube-api-access-7sfsv\") pod \"08079426-ad38-42d6-a577-b3f6a39fcf7b\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.722790 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08079426-ad38-42d6-a577-b3f6a39fcf7b-logs\") pod \"08079426-ad38-42d6-a577-b3f6a39fcf7b\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.722832 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-config-data\") pod \"08079426-ad38-42d6-a577-b3f6a39fcf7b\" (UID: \"08079426-ad38-42d6-a577-b3f6a39fcf7b\") " Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.724729 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08079426-ad38-42d6-a577-b3f6a39fcf7b-logs" (OuterVolumeSpecName: "logs") pod "08079426-ad38-42d6-a577-b3f6a39fcf7b" (UID: "08079426-ad38-42d6-a577-b3f6a39fcf7b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.726292 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.727259 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08079426-ad38-42d6-a577-b3f6a39fcf7b-kube-api-access-7sfsv" (OuterVolumeSpecName: "kube-api-access-7sfsv") pod "08079426-ad38-42d6-a577-b3f6a39fcf7b" (UID: "08079426-ad38-42d6-a577-b3f6a39fcf7b"). InnerVolumeSpecName "kube-api-access-7sfsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.746732 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.758188 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:59 crc kubenswrapper[4954]: E1128 16:35:59.762958 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-log" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.763427 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-log" Nov 28 16:35:59 crc kubenswrapper[4954]: E1128 16:35:59.763520 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-metadata" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.763623 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-metadata" Nov 28 16:35:59 crc kubenswrapper[4954]: E1128 16:35:59.763711 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cf8cff4-ff61-4663-a23b-d68532916ee3" containerName="nova-manage" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.763797 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cf8cff4-ff61-4663-a23b-d68532916ee3" containerName="nova-manage" Nov 28 16:35:59 crc kubenswrapper[4954]: E1128 16:35:59.763874 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-log" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764000 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-log" Nov 28 16:35:59 crc kubenswrapper[4954]: E1128 16:35:59.764081 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-api" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764158 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-api" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764079 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-config-data" (OuterVolumeSpecName: "config-data") pod "08079426-ad38-42d6-a577-b3f6a39fcf7b" (UID: "08079426-ad38-42d6-a577-b3f6a39fcf7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764628 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-log" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764718 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-metadata" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764797 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" containerName="nova-api-api" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764863 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cf8cff4-ff61-4663-a23b-d68532916ee3" containerName="nova-manage" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.764929 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="24190ac6-aa40-4fcf-8722-f86426458655" containerName="nova-metadata-log" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.766352 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.768839 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.769212 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.793087 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.802920 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08079426-ad38-42d6-a577-b3f6a39fcf7b" (UID: "08079426-ad38-42d6-a577-b3f6a39fcf7b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.809256 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "08079426-ad38-42d6-a577-b3f6a39fcf7b" (UID: "08079426-ad38-42d6-a577-b3f6a39fcf7b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.818103 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "08079426-ad38-42d6-a577-b3f6a39fcf7b" (UID: "08079426-ad38-42d6-a577-b3f6a39fcf7b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.825298 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.825335 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.825346 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sfsv\" (UniqueName: \"kubernetes.io/projected/08079426-ad38-42d6-a577-b3f6a39fcf7b-kube-api-access-7sfsv\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.825357 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08079426-ad38-42d6-a577-b3f6a39fcf7b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.825367 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.825376 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/08079426-ad38-42d6-a577-b3f6a39fcf7b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.867619 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24190ac6-aa40-4fcf-8722-f86426458655" path="/var/lib/kubelet/pods/24190ac6-aa40-4fcf-8722-f86426458655/volumes" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.927083 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-config-data\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.927234 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/642e0138-17d8-47e0-a67d-51a758291f7e-logs\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.927349 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.927400 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfvxt\" (UniqueName: \"kubernetes.io/projected/642e0138-17d8-47e0-a67d-51a758291f7e-kube-api-access-kfvxt\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:35:59 crc kubenswrapper[4954]: I1128 16:35:59.927483 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.028959 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/642e0138-17d8-47e0-a67d-51a758291f7e-logs\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.029047 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.029076 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfvxt\" (UniqueName: \"kubernetes.io/projected/642e0138-17d8-47e0-a67d-51a758291f7e-kube-api-access-kfvxt\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.029140 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.029179 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-config-data\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.029788 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/642e0138-17d8-47e0-a67d-51a758291f7e-logs\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.032865 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.033199 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.037411 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-config-data\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.044323 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfvxt\" (UniqueName: \"kubernetes.io/projected/642e0138-17d8-47e0-a67d-51a758291f7e-kube-api-access-kfvxt\") pod \"nova-metadata-0\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.211819 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:36:00 crc kubenswrapper[4954]: E1128 16:36:00.399760 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:00 crc kubenswrapper[4954]: E1128 16:36:00.407501 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:00 crc kubenswrapper[4954]: E1128 16:36:00.411086 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:00 crc kubenswrapper[4954]: E1128 16:36:00.411165 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="8eae8ee3-3bd5-4463-b936-a237fbf26d2a" containerName="nova-scheduler-scheduler" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.420103 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"08079426-ad38-42d6-a577-b3f6a39fcf7b","Type":"ContainerDied","Data":"6c5072715e1598a68b9e8faecfbf0e4f7e5934a8f284555426656acced79ffdf"} Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.420127 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.420160 4954 scope.go:117] "RemoveContainer" containerID="0fd0232fabbca0961d47fa5bcd1b8efb037dba790423f7d845dda029f0e363b7" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.474747 4954 scope.go:117] "RemoveContainer" containerID="6c2fe3e317b6db7af3eef9438d7e70a3f49e296683477148b5dbea4ff402eb08" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.498408 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.525336 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.536686 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.538462 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.542053 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.542195 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.542275 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.547761 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.639164 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-config-data\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.639249 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-public-tls-certs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.639274 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-logs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.639378 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.639471 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdglh\" (UniqueName: \"kubernetes.io/projected/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-kube-api-access-hdglh\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.639539 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: W1128 16:36:00.691297 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod642e0138_17d8_47e0_a67d_51a758291f7e.slice/crio-c8c37ba9170312790571f11b8cb1cfccb49ce1f56757e7a5b453681557696a42 WatchSource:0}: Error finding container c8c37ba9170312790571f11b8cb1cfccb49ce1f56757e7a5b453681557696a42: Status 404 returned error can't find the container with id c8c37ba9170312790571f11b8cb1cfccb49ce1f56757e7a5b453681557696a42 Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.693519 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.741156 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.741216 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-config-data\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.741296 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-public-tls-certs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.741322 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-logs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.741384 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.741451 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdglh\" (UniqueName: \"kubernetes.io/projected/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-kube-api-access-hdglh\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.742050 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-logs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.747636 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.748813 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-public-tls-certs\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.752394 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-config-data\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.757692 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.759701 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdglh\" (UniqueName: \"kubernetes.io/projected/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-kube-api-access-hdglh\") pod \"nova-api-0\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " pod="openstack/nova-api-0" Nov 28 16:36:00 crc kubenswrapper[4954]: I1128 16:36:00.862386 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.296890 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.451518 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5","Type":"ContainerStarted","Data":"1a2263f6f7b183effffaa5a53ff69918d73a88d7567a168ae22f5767d83d11b9"} Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.459289 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"642e0138-17d8-47e0-a67d-51a758291f7e","Type":"ContainerStarted","Data":"a90a636e9b99d45f8039d9676ed4cf0cbaf5b8af82689455191e468cfb83faa4"} Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.459338 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"642e0138-17d8-47e0-a67d-51a758291f7e","Type":"ContainerStarted","Data":"a4137e9f8aff40189511ca5d877f9f9ea4572d1ba1b973060ea1671fe11c5291"} Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.459348 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"642e0138-17d8-47e0-a67d-51a758291f7e","Type":"ContainerStarted","Data":"c8c37ba9170312790571f11b8cb1cfccb49ce1f56757e7a5b453681557696a42"} Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.470673 4954 generic.go:334] "Generic (PLEG): container finished" podID="8eae8ee3-3bd5-4463-b936-a237fbf26d2a" containerID="58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522" exitCode=0 Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.470718 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8eae8ee3-3bd5-4463-b936-a237fbf26d2a","Type":"ContainerDied","Data":"58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522"} Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.488115 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.488095015 podStartE2EDuration="2.488095015s" podCreationTimestamp="2025-11-28 16:35:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:01.481395689 +0000 UTC m=+1514.873064220" watchObservedRunningTime="2025-11-28 16:36:01.488095015 +0000 UTC m=+1514.879763556" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.653136 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.763344 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5dwk\" (UniqueName: \"kubernetes.io/projected/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-kube-api-access-z5dwk\") pod \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.763488 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-combined-ca-bundle\") pod \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.763579 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-config-data\") pod \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\" (UID: \"8eae8ee3-3bd5-4463-b936-a237fbf26d2a\") " Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.767835 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-kube-api-access-z5dwk" (OuterVolumeSpecName: "kube-api-access-z5dwk") pod "8eae8ee3-3bd5-4463-b936-a237fbf26d2a" (UID: "8eae8ee3-3bd5-4463-b936-a237fbf26d2a"). InnerVolumeSpecName "kube-api-access-z5dwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.801363 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-config-data" (OuterVolumeSpecName: "config-data") pod "8eae8ee3-3bd5-4463-b936-a237fbf26d2a" (UID: "8eae8ee3-3bd5-4463-b936-a237fbf26d2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.802741 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8eae8ee3-3bd5-4463-b936-a237fbf26d2a" (UID: "8eae8ee3-3bd5-4463-b936-a237fbf26d2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.867486 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5dwk\" (UniqueName: \"kubernetes.io/projected/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-kube-api-access-z5dwk\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.867513 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.867536 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8eae8ee3-3bd5-4463-b936-a237fbf26d2a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:01 crc kubenswrapper[4954]: I1128 16:36:01.870676 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08079426-ad38-42d6-a577-b3f6a39fcf7b" path="/var/lib/kubelet/pods/08079426-ad38-42d6-a577-b3f6a39fcf7b/volumes" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.486040 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.486268 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8eae8ee3-3bd5-4463-b936-a237fbf26d2a","Type":"ContainerDied","Data":"dc83373818f1abdd0e1f401aaee8c5b1e34a1e198c9f5ee9a78a8054b0389526"} Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.486310 4954 scope.go:117] "RemoveContainer" containerID="58fc661e399a38dfc54e41dbaffdaef877ddb21d80fbfae544a83c1987cc0522" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.490129 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5","Type":"ContainerStarted","Data":"c22a271e9381f4b47783c1a7935315aa68fd7d56ae23e9e0515f0742f1172519"} Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.490429 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5","Type":"ContainerStarted","Data":"5a2d7806f5c1628a5fc711b36527a7d4ae8db75c650b99cdcc2d5020d8a8b6cd"} Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.517138 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.5171179439999998 podStartE2EDuration="2.517117944s" podCreationTimestamp="2025-11-28 16:36:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:02.509130316 +0000 UTC m=+1515.900798857" watchObservedRunningTime="2025-11-28 16:36:02.517117944 +0000 UTC m=+1515.908786485" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.534583 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.545712 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.560709 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:02 crc kubenswrapper[4954]: E1128 16:36:02.561221 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eae8ee3-3bd5-4463-b936-a237fbf26d2a" containerName="nova-scheduler-scheduler" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.561238 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eae8ee3-3bd5-4463-b936-a237fbf26d2a" containerName="nova-scheduler-scheduler" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.561479 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eae8ee3-3bd5-4463-b936-a237fbf26d2a" containerName="nova-scheduler-scheduler" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.562232 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.572361 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.573211 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.682571 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.682631 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skklp\" (UniqueName: \"kubernetes.io/projected/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-kube-api-access-skklp\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.682706 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-config-data\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.784348 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-config-data\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.784537 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.784582 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skklp\" (UniqueName: \"kubernetes.io/projected/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-kube-api-access-skklp\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.791963 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-config-data\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.792053 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.803941 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skklp\" (UniqueName: \"kubernetes.io/projected/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-kube-api-access-skklp\") pod \"nova-scheduler-0\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " pod="openstack/nova-scheduler-0" Nov 28 16:36:02 crc kubenswrapper[4954]: I1128 16:36:02.882060 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:03 crc kubenswrapper[4954]: I1128 16:36:03.370350 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:03 crc kubenswrapper[4954]: I1128 16:36:03.505693 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f8f01be-65c4-4a56-85e5-1b46ba5804ee","Type":"ContainerStarted","Data":"5e51b63f07aa7be73cc0b5f92d7d06ac103b6c27879ac6f5b3d5ab3d20d8add6"} Nov 28 16:36:03 crc kubenswrapper[4954]: I1128 16:36:03.869559 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8eae8ee3-3bd5-4463-b936-a237fbf26d2a" path="/var/lib/kubelet/pods/8eae8ee3-3bd5-4463-b936-a237fbf26d2a/volumes" Nov 28 16:36:04 crc kubenswrapper[4954]: I1128 16:36:04.515049 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f8f01be-65c4-4a56-85e5-1b46ba5804ee","Type":"ContainerStarted","Data":"509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086"} Nov 28 16:36:04 crc kubenswrapper[4954]: I1128 16:36:04.536353 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.53633589 podStartE2EDuration="2.53633589s" podCreationTimestamp="2025-11-28 16:36:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:04.533784128 +0000 UTC m=+1517.925452669" watchObservedRunningTime="2025-11-28 16:36:04.53633589 +0000 UTC m=+1517.928004431" Nov 28 16:36:05 crc kubenswrapper[4954]: I1128 16:36:05.212348 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:36:05 crc kubenswrapper[4954]: I1128 16:36:05.212505 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:36:05 crc kubenswrapper[4954]: I1128 16:36:05.364446 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.784180 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s8r46"] Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.787630 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.796310 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s8r46"] Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.885257 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-catalog-content\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.885382 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-utilities\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.885461 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rngzm\" (UniqueName: \"kubernetes.io/projected/5a5e3976-56be-44b7-82c4-7137dd0478f4-kube-api-access-rngzm\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.987624 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-catalog-content\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.987943 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-utilities\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.988020 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rngzm\" (UniqueName: \"kubernetes.io/projected/5a5e3976-56be-44b7-82c4-7137dd0478f4-kube-api-access-rngzm\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.988142 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-catalog-content\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:06 crc kubenswrapper[4954]: I1128 16:36:06.988389 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-utilities\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:07 crc kubenswrapper[4954]: I1128 16:36:07.009747 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rngzm\" (UniqueName: \"kubernetes.io/projected/5a5e3976-56be-44b7-82c4-7137dd0478f4-kube-api-access-rngzm\") pod \"community-operators-s8r46\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:07 crc kubenswrapper[4954]: I1128 16:36:07.126948 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:07 crc kubenswrapper[4954]: I1128 16:36:07.708900 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s8r46"] Nov 28 16:36:07 crc kubenswrapper[4954]: I1128 16:36:07.883040 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:36:08 crc kubenswrapper[4954]: I1128 16:36:08.560387 4954 generic.go:334] "Generic (PLEG): container finished" podID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerID="83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418" exitCode=0 Nov 28 16:36:08 crc kubenswrapper[4954]: I1128 16:36:08.560459 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8r46" event={"ID":"5a5e3976-56be-44b7-82c4-7137dd0478f4","Type":"ContainerDied","Data":"83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418"} Nov 28 16:36:08 crc kubenswrapper[4954]: I1128 16:36:08.560696 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8r46" event={"ID":"5a5e3976-56be-44b7-82c4-7137dd0478f4","Type":"ContainerStarted","Data":"46ec4127fdf6d56848172d51f9ed3e6246d15c0b64e634dcc78a2ba2614a149e"} Nov 28 16:36:09 crc kubenswrapper[4954]: I1128 16:36:09.578198 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8r46" event={"ID":"5a5e3976-56be-44b7-82c4-7137dd0478f4","Type":"ContainerStarted","Data":"d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d"} Nov 28 16:36:10 crc kubenswrapper[4954]: I1128 16:36:10.212925 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:36:10 crc kubenswrapper[4954]: I1128 16:36:10.213005 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:36:10 crc kubenswrapper[4954]: I1128 16:36:10.590028 4954 generic.go:334] "Generic (PLEG): container finished" podID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerID="d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d" exitCode=0 Nov 28 16:36:10 crc kubenswrapper[4954]: I1128 16:36:10.590149 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8r46" event={"ID":"5a5e3976-56be-44b7-82c4-7137dd0478f4","Type":"ContainerDied","Data":"d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d"} Nov 28 16:36:10 crc kubenswrapper[4954]: I1128 16:36:10.863664 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:36:10 crc kubenswrapper[4954]: I1128 16:36:10.863796 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:36:11 crc kubenswrapper[4954]: I1128 16:36:11.261883 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:36:11 crc kubenswrapper[4954]: I1128 16:36:11.261909 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:36:11 crc kubenswrapper[4954]: I1128 16:36:11.604389 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8r46" event={"ID":"5a5e3976-56be-44b7-82c4-7137dd0478f4","Type":"ContainerStarted","Data":"23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38"} Nov 28 16:36:11 crc kubenswrapper[4954]: I1128 16:36:11.623157 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s8r46" podStartSLOduration=3.068621076 podStartE2EDuration="5.623136791s" podCreationTimestamp="2025-11-28 16:36:06 +0000 UTC" firstStartedPulling="2025-11-28 16:36:08.562195846 +0000 UTC m=+1521.953864387" lastFinishedPulling="2025-11-28 16:36:11.116711561 +0000 UTC m=+1524.508380102" observedRunningTime="2025-11-28 16:36:11.621699784 +0000 UTC m=+1525.013368335" watchObservedRunningTime="2025-11-28 16:36:11.623136791 +0000 UTC m=+1525.014805332" Nov 28 16:36:11 crc kubenswrapper[4954]: I1128 16:36:11.884797 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:36:11 crc kubenswrapper[4954]: I1128 16:36:11.885172 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 16:36:12 crc kubenswrapper[4954]: I1128 16:36:12.882566 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:36:12 crc kubenswrapper[4954]: I1128 16:36:12.932022 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:36:13 crc kubenswrapper[4954]: I1128 16:36:13.663579 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:36:17 crc kubenswrapper[4954]: I1128 16:36:17.127950 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:17 crc kubenswrapper[4954]: I1128 16:36:17.132772 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:17 crc kubenswrapper[4954]: I1128 16:36:17.189981 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:17 crc kubenswrapper[4954]: I1128 16:36:17.728761 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:17 crc kubenswrapper[4954]: I1128 16:36:17.778029 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s8r46"] Nov 28 16:36:19 crc kubenswrapper[4954]: I1128 16:36:19.680786 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s8r46" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="registry-server" containerID="cri-o://23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38" gracePeriod=2 Nov 28 16:36:19 crc kubenswrapper[4954]: I1128 16:36:19.846712 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6bpmk"] Nov 28 16:36:19 crc kubenswrapper[4954]: I1128 16:36:19.849149 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:19 crc kubenswrapper[4954]: I1128 16:36:19.867630 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bpmk"] Nov 28 16:36:19 crc kubenswrapper[4954]: I1128 16:36:19.945212 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-catalog-content\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:19 crc kubenswrapper[4954]: I1128 16:36:19.945454 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-utilities\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:19 crc kubenswrapper[4954]: I1128 16:36:19.945504 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7xfr\" (UniqueName: \"kubernetes.io/projected/dd07b145-f58b-48bc-b8cf-430b14a6c842-kube-api-access-z7xfr\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.047627 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-utilities\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.047960 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7xfr\" (UniqueName: \"kubernetes.io/projected/dd07b145-f58b-48bc-b8cf-430b14a6c842-kube-api-access-z7xfr\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.048004 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-catalog-content\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.048240 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-utilities\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.048436 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-catalog-content\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.080567 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7xfr\" (UniqueName: \"kubernetes.io/projected/dd07b145-f58b-48bc-b8cf-430b14a6c842-kube-api-access-z7xfr\") pod \"certified-operators-6bpmk\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.212276 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.218157 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.230883 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.243968 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.247041 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.354112 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rngzm\" (UniqueName: \"kubernetes.io/projected/5a5e3976-56be-44b7-82c4-7137dd0478f4-kube-api-access-rngzm\") pod \"5a5e3976-56be-44b7-82c4-7137dd0478f4\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.354245 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-catalog-content\") pod \"5a5e3976-56be-44b7-82c4-7137dd0478f4\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.354347 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-utilities\") pod \"5a5e3976-56be-44b7-82c4-7137dd0478f4\" (UID: \"5a5e3976-56be-44b7-82c4-7137dd0478f4\") " Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.357717 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-utilities" (OuterVolumeSpecName: "utilities") pod "5a5e3976-56be-44b7-82c4-7137dd0478f4" (UID: "5a5e3976-56be-44b7-82c4-7137dd0478f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.371737 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a5e3976-56be-44b7-82c4-7137dd0478f4-kube-api-access-rngzm" (OuterVolumeSpecName: "kube-api-access-rngzm") pod "5a5e3976-56be-44b7-82c4-7137dd0478f4" (UID: "5a5e3976-56be-44b7-82c4-7137dd0478f4"). InnerVolumeSpecName "kube-api-access-rngzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.435655 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a5e3976-56be-44b7-82c4-7137dd0478f4" (UID: "5a5e3976-56be-44b7-82c4-7137dd0478f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.458572 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rngzm\" (UniqueName: \"kubernetes.io/projected/5a5e3976-56be-44b7-82c4-7137dd0478f4-kube-api-access-rngzm\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.458605 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.458621 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a5e3976-56be-44b7-82c4-7137dd0478f4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.690716 4954 generic.go:334] "Generic (PLEG): container finished" podID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerID="23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38" exitCode=0 Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.690852 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s8r46" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.690920 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8r46" event={"ID":"5a5e3976-56be-44b7-82c4-7137dd0478f4","Type":"ContainerDied","Data":"23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38"} Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.690969 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s8r46" event={"ID":"5a5e3976-56be-44b7-82c4-7137dd0478f4","Type":"ContainerDied","Data":"46ec4127fdf6d56848172d51f9ed3e6246d15c0b64e634dcc78a2ba2614a149e"} Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.690993 4954 scope.go:117] "RemoveContainer" containerID="23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.696619 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.720370 4954 scope.go:117] "RemoveContainer" containerID="d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.748869 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s8r46"] Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.760344 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s8r46"] Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.778806 4954 scope.go:117] "RemoveContainer" containerID="83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.779116 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bpmk"] Nov 28 16:36:20 crc kubenswrapper[4954]: W1128 16:36:20.792059 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd07b145_f58b_48bc_b8cf_430b14a6c842.slice/crio-27af9c6c8b8a0a619973f73de9f8ccc756d92fd43090c916b7882748813e8b2a WatchSource:0}: Error finding container 27af9c6c8b8a0a619973f73de9f8ccc756d92fd43090c916b7882748813e8b2a: Status 404 returned error can't find the container with id 27af9c6c8b8a0a619973f73de9f8ccc756d92fd43090c916b7882748813e8b2a Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.810728 4954 scope.go:117] "RemoveContainer" containerID="23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38" Nov 28 16:36:20 crc kubenswrapper[4954]: E1128 16:36:20.818552 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38\": container with ID starting with 23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38 not found: ID does not exist" containerID="23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.818607 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38"} err="failed to get container status \"23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38\": rpc error: code = NotFound desc = could not find container \"23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38\": container with ID starting with 23a036e9d75eb9cfc1dd8a3b32031a2d37ae3662602ee7c8b41ce4a201857b38 not found: ID does not exist" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.818638 4954 scope.go:117] "RemoveContainer" containerID="d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d" Nov 28 16:36:20 crc kubenswrapper[4954]: E1128 16:36:20.831820 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d\": container with ID starting with d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d not found: ID does not exist" containerID="d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.831881 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d"} err="failed to get container status \"d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d\": rpc error: code = NotFound desc = could not find container \"d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d\": container with ID starting with d76122da848476606fb2187c164e65e497ba5dd3ffd7719e30842eb43437471d not found: ID does not exist" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.831917 4954 scope.go:117] "RemoveContainer" containerID="83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418" Nov 28 16:36:20 crc kubenswrapper[4954]: E1128 16:36:20.840271 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418\": container with ID starting with 83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418 not found: ID does not exist" containerID="83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.840337 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418"} err="failed to get container status \"83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418\": rpc error: code = NotFound desc = could not find container \"83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418\": container with ID starting with 83f5fa93fc798e117eed2cd37a49d6f3c93b23ab3996476669669bfd00d47418 not found: ID does not exist" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.871619 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.872001 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.872115 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:36:20 crc kubenswrapper[4954]: I1128 16:36:20.879401 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:36:21 crc kubenswrapper[4954]: I1128 16:36:21.701752 4954 generic.go:334] "Generic (PLEG): container finished" podID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerID="c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298" exitCode=0 Nov 28 16:36:21 crc kubenswrapper[4954]: I1128 16:36:21.701847 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bpmk" event={"ID":"dd07b145-f58b-48bc-b8cf-430b14a6c842","Type":"ContainerDied","Data":"c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298"} Nov 28 16:36:21 crc kubenswrapper[4954]: I1128 16:36:21.701899 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bpmk" event={"ID":"dd07b145-f58b-48bc-b8cf-430b14a6c842","Type":"ContainerStarted","Data":"27af9c6c8b8a0a619973f73de9f8ccc756d92fd43090c916b7882748813e8b2a"} Nov 28 16:36:21 crc kubenswrapper[4954]: I1128 16:36:21.702861 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:36:21 crc kubenswrapper[4954]: I1128 16:36:21.710450 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:36:21 crc kubenswrapper[4954]: I1128 16:36:21.867784 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" path="/var/lib/kubelet/pods/5a5e3976-56be-44b7-82c4-7137dd0478f4/volumes" Nov 28 16:36:23 crc kubenswrapper[4954]: I1128 16:36:23.728404 4954 generic.go:334] "Generic (PLEG): container finished" podID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerID="d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d" exitCode=0 Nov 28 16:36:23 crc kubenswrapper[4954]: I1128 16:36:23.728465 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bpmk" event={"ID":"dd07b145-f58b-48bc-b8cf-430b14a6c842","Type":"ContainerDied","Data":"d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d"} Nov 28 16:36:24 crc kubenswrapper[4954]: I1128 16:36:24.743734 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bpmk" event={"ID":"dd07b145-f58b-48bc-b8cf-430b14a6c842","Type":"ContainerStarted","Data":"993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c"} Nov 28 16:36:24 crc kubenswrapper[4954]: I1128 16:36:24.771800 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6bpmk" podStartSLOduration=3.168900891 podStartE2EDuration="5.771781146s" podCreationTimestamp="2025-11-28 16:36:19 +0000 UTC" firstStartedPulling="2025-11-28 16:36:21.704154326 +0000 UTC m=+1535.095822867" lastFinishedPulling="2025-11-28 16:36:24.307034561 +0000 UTC m=+1537.698703122" observedRunningTime="2025-11-28 16:36:24.762343422 +0000 UTC m=+1538.154011993" watchObservedRunningTime="2025-11-28 16:36:24.771781146 +0000 UTC m=+1538.163449687" Nov 28 16:36:30 crc kubenswrapper[4954]: I1128 16:36:30.212488 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:30 crc kubenswrapper[4954]: I1128 16:36:30.213157 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:30 crc kubenswrapper[4954]: I1128 16:36:30.261084 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:30 crc kubenswrapper[4954]: I1128 16:36:30.847772 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:30 crc kubenswrapper[4954]: I1128 16:36:30.901670 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bpmk"] Nov 28 16:36:32 crc kubenswrapper[4954]: I1128 16:36:32.480857 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:36:32 crc kubenswrapper[4954]: I1128 16:36:32.480920 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:36:32 crc kubenswrapper[4954]: I1128 16:36:32.821102 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6bpmk" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="registry-server" containerID="cri-o://993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c" gracePeriod=2 Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.307587 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.420821 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-catalog-content\") pod \"dd07b145-f58b-48bc-b8cf-430b14a6c842\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.421008 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7xfr\" (UniqueName: \"kubernetes.io/projected/dd07b145-f58b-48bc-b8cf-430b14a6c842-kube-api-access-z7xfr\") pod \"dd07b145-f58b-48bc-b8cf-430b14a6c842\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.421082 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-utilities\") pod \"dd07b145-f58b-48bc-b8cf-430b14a6c842\" (UID: \"dd07b145-f58b-48bc-b8cf-430b14a6c842\") " Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.422415 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-utilities" (OuterVolumeSpecName: "utilities") pod "dd07b145-f58b-48bc-b8cf-430b14a6c842" (UID: "dd07b145-f58b-48bc-b8cf-430b14a6c842"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.427633 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd07b145-f58b-48bc-b8cf-430b14a6c842-kube-api-access-z7xfr" (OuterVolumeSpecName: "kube-api-access-z7xfr") pod "dd07b145-f58b-48bc-b8cf-430b14a6c842" (UID: "dd07b145-f58b-48bc-b8cf-430b14a6c842"). InnerVolumeSpecName "kube-api-access-z7xfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.474698 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd07b145-f58b-48bc-b8cf-430b14a6c842" (UID: "dd07b145-f58b-48bc-b8cf-430b14a6c842"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.525229 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.525274 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7xfr\" (UniqueName: \"kubernetes.io/projected/dd07b145-f58b-48bc-b8cf-430b14a6c842-kube-api-access-z7xfr\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.525289 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd07b145-f58b-48bc-b8cf-430b14a6c842-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.833071 4954 generic.go:334] "Generic (PLEG): container finished" podID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerID="993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c" exitCode=0 Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.833110 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bpmk" event={"ID":"dd07b145-f58b-48bc-b8cf-430b14a6c842","Type":"ContainerDied","Data":"993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c"} Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.833136 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bpmk" event={"ID":"dd07b145-f58b-48bc-b8cf-430b14a6c842","Type":"ContainerDied","Data":"27af9c6c8b8a0a619973f73de9f8ccc756d92fd43090c916b7882748813e8b2a"} Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.833153 4954 scope.go:117] "RemoveContainer" containerID="993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.833276 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bpmk" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.862005 4954 scope.go:117] "RemoveContainer" containerID="d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.878478 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bpmk"] Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.888498 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6bpmk"] Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.907351 4954 scope.go:117] "RemoveContainer" containerID="c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.943725 4954 scope.go:117] "RemoveContainer" containerID="993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c" Nov 28 16:36:33 crc kubenswrapper[4954]: E1128 16:36:33.944188 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c\": container with ID starting with 993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c not found: ID does not exist" containerID="993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.944230 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c"} err="failed to get container status \"993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c\": rpc error: code = NotFound desc = could not find container \"993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c\": container with ID starting with 993ff4966f73397f5ab5a4e7ccefc73ef1be860e8e34c583dbf4276da9ef635c not found: ID does not exist" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.944258 4954 scope.go:117] "RemoveContainer" containerID="d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d" Nov 28 16:36:33 crc kubenswrapper[4954]: E1128 16:36:33.944760 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d\": container with ID starting with d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d not found: ID does not exist" containerID="d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.944798 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d"} err="failed to get container status \"d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d\": rpc error: code = NotFound desc = could not find container \"d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d\": container with ID starting with d4248ae82f12158f353cbc53c606789901c3c23e40cec132fb5541c779dd756d not found: ID does not exist" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.944820 4954 scope.go:117] "RemoveContainer" containerID="c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298" Nov 28 16:36:33 crc kubenswrapper[4954]: E1128 16:36:33.945166 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298\": container with ID starting with c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298 not found: ID does not exist" containerID="c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298" Nov 28 16:36:33 crc kubenswrapper[4954]: I1128 16:36:33.945202 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298"} err="failed to get container status \"c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298\": rpc error: code = NotFound desc = could not find container \"c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298\": container with ID starting with c91d6feecbf66ab1b7d14745c0d0c73e94c0b63bef554387e3d5c0d5bcc4a298 not found: ID does not exist" Nov 28 16:36:35 crc kubenswrapper[4954]: I1128 16:36:35.868312 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" path="/var/lib/kubelet/pods/dd07b145-f58b-48bc-b8cf-430b14a6c842/volumes" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.421284 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.422177 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="082f7647-b97a-4d57-83b1-ae69d61827b1" containerName="openstackclient" containerID="cri-o://8f3b70dbbdb6e3dcdff478850bd55b90a960a4d6e61d20eb2e977c9634518dce" gracePeriod=2 Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.450707 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.584926 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.747893 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glanceb261-account-delete-s574s"] Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.748323 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="extract-utilities" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748341 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="extract-utilities" Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.748352 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="extract-utilities" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748359 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="extract-utilities" Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.748377 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="extract-content" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748384 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="extract-content" Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.748397 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="registry-server" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748404 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="registry-server" Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.748416 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="registry-server" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748422 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="registry-server" Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.748443 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="082f7647-b97a-4d57-83b1-ae69d61827b1" containerName="openstackclient" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748448 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="082f7647-b97a-4d57-83b1-ae69d61827b1" containerName="openstackclient" Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.748454 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="extract-content" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748460 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="extract-content" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748681 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd07b145-f58b-48bc-b8cf-430b14a6c842" containerName="registry-server" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748697 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="082f7647-b97a-4d57-83b1-ae69d61827b1" containerName="openstackclient" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.748709 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a5e3976-56be-44b7-82c4-7137dd0478f4" containerName="registry-server" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.749358 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.767410 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glanceb261-account-delete-s574s"] Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.784750 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:41 crc kubenswrapper[4954]: E1128 16:36:41.784847 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data podName:ca81ef12-eb13-468e-81fc-0fdf6aba8830 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:42.284826811 +0000 UTC m=+1555.676495352 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data") pod "rabbitmq-cell1-server-0" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.892166 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acf16584-ed76-41dc-955b-17e86a277627-operator-scripts\") pod \"glanceb261-account-delete-s574s\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.892313 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vsjn\" (UniqueName: \"kubernetes.io/projected/acf16584-ed76-41dc-955b-17e86a277627-kube-api-access-2vsjn\") pod \"glanceb261-account-delete-s574s\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.926996 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement4c07-account-delete-52zll"] Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.928332 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.966594 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement4c07-account-delete-52zll"] Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.986613 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.987051 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="ovn-northd" containerID="cri-o://a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" gracePeriod=30 Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.987631 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="openstack-network-exporter" containerID="cri-o://022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6" gracePeriod=30 Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.995641 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vsjn\" (UniqueName: \"kubernetes.io/projected/acf16584-ed76-41dc-955b-17e86a277627-kube-api-access-2vsjn\") pod \"glanceb261-account-delete-s574s\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.995907 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acf16584-ed76-41dc-955b-17e86a277627-operator-scripts\") pod \"glanceb261-account-delete-s574s\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.998122 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.998724 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acf16584-ed76-41dc-955b-17e86a277627-operator-scripts\") pod \"glanceb261-account-delete-s574s\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:41 crc kubenswrapper[4954]: I1128 16:36:41.999459 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="openstack-network-exporter" containerID="cri-o://0bc97c121bcab5b752053d9d791ed5b026bc09edb0e37b0c2ab456c187dcfed8" gracePeriod=300 Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.089550 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vsjn\" (UniqueName: \"kubernetes.io/projected/acf16584-ed76-41dc-955b-17e86a277627-kube-api-access-2vsjn\") pod \"glanceb261-account-delete-s574s\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.112421 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-operator-scripts\") pod \"placement4c07-account-delete-52zll\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.112836 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjstg\" (UniqueName: \"kubernetes.io/projected/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-kube-api-access-cjstg\") pod \"placement4c07-account-delete-52zll\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.188654 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinderd6b0-account-delete-plvlb"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.190265 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.204373 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="ovsdbserver-nb" containerID="cri-o://107051ce6919142522558805719fa4abac53f52c0b68b1e403a6ef8355295c53" gracePeriod=300 Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.218623 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-operator-scripts\") pod \"placement4c07-account-delete-52zll\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.218814 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjstg\" (UniqueName: \"kubernetes.io/projected/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-kube-api-access-cjstg\") pod \"placement4c07-account-delete-52zll\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.219430 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-operator-scripts\") pod \"placement4c07-account-delete-52zll\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.221616 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderd6b0-account-delete-plvlb"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.276857 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron9335-account-delete-9jxvf"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.278268 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.282796 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjstg\" (UniqueName: \"kubernetes.io/projected/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-kube-api-access-cjstg\") pod \"placement4c07-account-delete-52zll\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.315307 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron9335-account-delete-9jxvf"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.320798 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d887f697-ad36-44e6-8011-878d0a78b3bf-operator-scripts\") pod \"cinderd6b0-account-delete-plvlb\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.321284 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95fn9\" (UniqueName: \"kubernetes.io/projected/d887f697-ad36-44e6-8011-878d0a78b3bf-kube-api-access-95fn9\") pod \"cinderd6b0-account-delete-plvlb\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: E1128 16:36:42.321546 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:42 crc kubenswrapper[4954]: E1128 16:36:42.321691 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data podName:ca81ef12-eb13-468e-81fc-0fdf6aba8830 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:43.321676431 +0000 UTC m=+1556.713344972 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data") pod "rabbitmq-cell1-server-0" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.328057 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.336822 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="openstack-network-exporter" containerID="cri-o://08f7109509c86b6eda033eb417ad90a9b3211cb608828eb97354929fe65260e4" gracePeriod=300 Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.387052 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.387823 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican3a2d-account-delete-cznv8"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.389153 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.428566 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d887f697-ad36-44e6-8011-878d0a78b3bf-operator-scripts\") pod \"cinderd6b0-account-delete-plvlb\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.428616 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/762dc5ac-a8d3-4b91-806c-079e568530b1-operator-scripts\") pod \"neutron9335-account-delete-9jxvf\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.428693 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwtx7\" (UniqueName: \"kubernetes.io/projected/762dc5ac-a8d3-4b91-806c-079e568530b1-kube-api-access-kwtx7\") pod \"neutron9335-account-delete-9jxvf\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.428756 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95fn9\" (UniqueName: \"kubernetes.io/projected/d887f697-ad36-44e6-8011-878d0a78b3bf-kube-api-access-95fn9\") pod \"cinderd6b0-account-delete-plvlb\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.429695 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d887f697-ad36-44e6-8011-878d0a78b3bf-operator-scripts\") pod \"cinderd6b0-account-delete-plvlb\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.440948 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-6kcgd"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.460141 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-6kcgd"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.497298 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95fn9\" (UniqueName: \"kubernetes.io/projected/d887f697-ad36-44e6-8011-878d0a78b3bf-kube-api-access-95fn9\") pod \"cinderd6b0-account-delete-plvlb\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.528754 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican3a2d-account-delete-cznv8"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.536607 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvkpb\" (UniqueName: \"kubernetes.io/projected/6abe9424-aabe-4ae6-8032-79b92583d31d-kube-api-access-gvkpb\") pod \"barbican3a2d-account-delete-cznv8\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.536712 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/762dc5ac-a8d3-4b91-806c-079e568530b1-operator-scripts\") pod \"neutron9335-account-delete-9jxvf\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.536768 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6abe9424-aabe-4ae6-8032-79b92583d31d-operator-scripts\") pod \"barbican3a2d-account-delete-cznv8\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.536803 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwtx7\" (UniqueName: \"kubernetes.io/projected/762dc5ac-a8d3-4b91-806c-079e568530b1-kube-api-access-kwtx7\") pod \"neutron9335-account-delete-9jxvf\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.537801 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/762dc5ac-a8d3-4b91-806c-079e568530b1-operator-scripts\") pod \"neutron9335-account-delete-9jxvf\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.552980 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.619614 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-p99dx"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.629620 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwtx7\" (UniqueName: \"kubernetes.io/projected/762dc5ac-a8d3-4b91-806c-079e568530b1-kube-api-access-kwtx7\") pod \"neutron9335-account-delete-9jxvf\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.630092 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.642862 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvkpb\" (UniqueName: \"kubernetes.io/projected/6abe9424-aabe-4ae6-8032-79b92583d31d-kube-api-access-gvkpb\") pod \"barbican3a2d-account-delete-cznv8\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.643001 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6abe9424-aabe-4ae6-8032-79b92583d31d-operator-scripts\") pod \"barbican3a2d-account-delete-cznv8\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.643770 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6abe9424-aabe-4ae6-8032-79b92583d31d-operator-scripts\") pod \"barbican3a2d-account-delete-cznv8\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.690345 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.701660 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-p99dx"] Nov 28 16:36:42 crc kubenswrapper[4954]: I1128 16:36:42.716407 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvkpb\" (UniqueName: \"kubernetes.io/projected/6abe9424-aabe-4ae6-8032-79b92583d31d-kube-api-access-gvkpb\") pod \"barbican3a2d-account-delete-cznv8\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:42.969289 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-v7lkp"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.023965 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-v7lkp"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.036581 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-dblvm"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.075159 4954 generic.go:334] "Generic (PLEG): container finished" podID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerID="08f7109509c86b6eda033eb417ad90a9b3211cb608828eb97354929fe65260e4" exitCode=2 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.075563 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6","Type":"ContainerDied","Data":"08f7109509c86b6eda033eb417ad90a9b3211cb608828eb97354929fe65260e4"} Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.077424 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-dblvm"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.108271 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_892117aa-d306-4a1d-bf6e-b203b6337537/ovsdbserver-nb/0.log" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.108328 4954 generic.go:334] "Generic (PLEG): container finished" podID="892117aa-d306-4a1d-bf6e-b203b6337537" containerID="0bc97c121bcab5b752053d9d791ed5b026bc09edb0e37b0c2ab456c187dcfed8" exitCode=2 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.108352 4954 generic.go:334] "Generic (PLEG): container finished" podID="892117aa-d306-4a1d-bf6e-b203b6337537" containerID="107051ce6919142522558805719fa4abac53f52c0b68b1e403a6ef8355295c53" exitCode=143 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.108432 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"892117aa-d306-4a1d-bf6e-b203b6337537","Type":"ContainerDied","Data":"0bc97c121bcab5b752053d9d791ed5b026bc09edb0e37b0c2ab456c187dcfed8"} Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.108465 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"892117aa-d306-4a1d-bf6e-b203b6337537","Type":"ContainerDied","Data":"107051ce6919142522558805719fa4abac53f52c0b68b1e403a6ef8355295c53"} Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.147611 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.164819 4954 generic.go:334] "Generic (PLEG): container finished" podID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerID="022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6" exitCode=2 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.164913 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-sqc29"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.164943 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8bff6e67-d9f4-4952-992d-1fa362d23a5c","Type":"ContainerDied","Data":"022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6"} Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.168774 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="ovsdbserver-sb" containerID="cri-o://a8434b10b60eb2a163aaaf4ff3727ed2fe9014b9cf717f4ce97fcc65af984b89" gracePeriod=300 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.177047 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-sqc29"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.195623 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-vmnzt"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.232657 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-55p57"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.235938 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.236076 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-55p57" podUID="041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" containerName="openstack-network-exporter" containerID="cri-o://e156c21bec3f6d7422ff833d8e664020da9203178d2bf9a0025944bc8713896f" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.263760 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-d4vfs"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.282609 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapif92b-account-delete-sj7n5"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.284163 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: E1128 16:36:43.297851 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:43 crc kubenswrapper[4954]: E1128 16:36:43.304936 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data podName:8a252e1a-c96a-4f98-b24e-b224fedf344c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:43.804902603 +0000 UTC m=+1557.196571144 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data") pod "rabbitmq-server-0" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c") : configmap "rabbitmq-config-data" not found Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.300833 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-89b6465dd-zsqfd"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.305338 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-89b6465dd-zsqfd" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-log" containerID="cri-o://25d180f3bd81c9004a4db6ad4e61d48011cac275e60bfe32015be950a41aedcd" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.305885 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-89b6465dd-zsqfd" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-api" containerID="cri-o://b9815c657f848312db17e79942f5eddf0b33ee714edd4d18a3accefb78385c5d" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.379049 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-dr7bj"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.379300 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerName="dnsmasq-dns" containerID="cri-o://346a713c806456b0618fdd74822beca4c32ebb91c0170c66b810d1f4091e8caf" gracePeriod=10 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.393721 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts\") pod \"novaapif92b-account-delete-sj7n5\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.393929 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqhwh\" (UniqueName: \"kubernetes.io/projected/1bc7ec75-0145-47f2-8193-28c8f17b572a-kube-api-access-xqhwh\") pod \"novaapif92b-account-delete-sj7n5\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: E1128 16:36:43.394078 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:43 crc kubenswrapper[4954]: E1128 16:36:43.394203 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data podName:ca81ef12-eb13-468e-81fc-0fdf6aba8830 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:45.39413049 +0000 UTC m=+1558.785799051 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data") pod "rabbitmq-cell1-server-0" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.423274 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-24bxb"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.448949 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-24bxb"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.481746 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0528b-account-delete-4cx2z"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.511852 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.521797 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqhwh\" (UniqueName: \"kubernetes.io/projected/1bc7ec75-0145-47f2-8193-28c8f17b572a-kube-api-access-xqhwh\") pod \"novaapif92b-account-delete-sj7n5\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.522199 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts\") pod \"novaapif92b-account-delete-sj7n5\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.522690 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapif92b-account-delete-sj7n5"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.530158 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts\") pod \"novaapif92b-account-delete-sj7n5\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.564893 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqhwh\" (UniqueName: \"kubernetes.io/projected/1bc7ec75-0145-47f2-8193-28c8f17b572a-kube-api-access-xqhwh\") pod \"novaapif92b-account-delete-sj7n5\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.578847 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0528b-account-delete-4cx2z"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.602173 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-5d8sw"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.617753 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-crjqq"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.628239 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-5d8sw"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.631329 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c7199-9850-4553-b644-371c7e305443-operator-scripts\") pod \"novacell0528b-account-delete-4cx2z\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.631381 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rlxx\" (UniqueName: \"kubernetes.io/projected/af0c7199-9850-4553-b644-371c7e305443-kube-api-access-7rlxx\") pod \"novacell0528b-account-delete-4cx2z\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.639758 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-crjqq"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.650713 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.650991 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-log" containerID="cri-o://64d4cc73b002f732d7801401e3ff287905c1abfeedbf8f510d4caf3a46f41b10" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.651615 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-httpd" containerID="cri-o://0a58c23a2643ee01442026f1cd5c2931589d7ded1516de8835e9e562824e613e" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.693545 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694188 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-server" containerID="cri-o://41ade0bb5410fb26da6929cbab355527e23f47061fb8a9b3b265c2dba9186585" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694345 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-updater" containerID="cri-o://201e26515a6013ab930eca8cf3032c3b5c434f99841737467e7187c6b2390519" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694321 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-server" containerID="cri-o://d6384cb554ea3695e28b1473c8d2c8cabce3a920e913bc674060bf33f2b8c9a0" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694416 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-auditor" containerID="cri-o://d21ef8c65b44b65b7b352925f975d0e931485db1bbb827c59654bc000dd7fc7f" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694464 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-replicator" containerID="cri-o://aa502d58af9315bcf5906e11b75470ef748ed5deffc2e4f87f6a86e7decf16e6" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694502 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-server" containerID="cri-o://e269517f2a950366081e2a4332d4c243079436ed72f1864b1a8bba98e71728ad" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694844 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="swift-recon-cron" containerID="cri-o://f3f394c2aab82db3f2e4a7ff59593262ff98b913c3e8a66fa7b1fc990d8e2ded" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694922 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="rsync" containerID="cri-o://ec66bc366041d8589f9f9c599f24a864c0e0bb563e8e92223216a3166545a6a1" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694975 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-expirer" containerID="cri-o://a2bca9bc4ada3b3fc6bc0382c9d6662693c39fd5f31277e3ff70afcea99f052c" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.695025 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-updater" containerID="cri-o://b3f9dee958ec0b21d8a91a7dab2c06039dfd85f545f18ca89dd9084c1a7e3b93" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.694563 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-reaper" containerID="cri-o://a2014dcb5213ccabf2e5a08d052c48b1fa541345bc969ed78a02b8acbd0b836e" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.695080 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-replicator" containerID="cri-o://3a8c57cddf7184457522309c1f75ea85035dd861fa97da4dc3572e601c38586a" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.695066 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-auditor" containerID="cri-o://dcc789be8b7c6143065659690f57fb8fb47dcf73a3d5b9f4d2416eef216a83d6" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.695152 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-auditor" containerID="cri-o://5bb02d057581590a07be4f6c68ca4784ff007688bf3a7b3a1009e0c9d6023107" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.695191 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-replicator" containerID="cri-o://a3de1c50e569442c4d137eb6c4f01fa275602e3c8780f037bca31766cae6424c" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.733432 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.739802 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c7199-9850-4553-b644-371c7e305443-operator-scripts\") pod \"novacell0528b-account-delete-4cx2z\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.739843 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rlxx\" (UniqueName: \"kubernetes.io/projected/af0c7199-9850-4553-b644-371c7e305443-kube-api-access-7rlxx\") pod \"novacell0528b-account-delete-4cx2z\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.739888 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="cinder-scheduler" containerID="cri-o://e0e955ad82d7153e1ce49532c6e27790509f8ccd441ac5a41fe5281a1f7f3c3a" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.740472 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="probe" containerID="cri-o://01194eaa4540cc51e694ffb69c1bc9bb2fcad75b283509d2f2d288ca98fa6388" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.741097 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c7199-9850-4553-b644-371c7e305443-operator-scripts\") pod \"novacell0528b-account-delete-4cx2z\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.764326 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.779513 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-bdd54468f-gzf48"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.779869 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-bdd54468f-gzf48" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-api" containerID="cri-o://50b931559f7a77fb0607febad2cc3106710093b63140da41ba7de9a70984ad4b" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.780682 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-bdd54468f-gzf48" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-httpd" containerID="cri-o://42c4ec9ba6dc6f636e97ef6169d3c32ab858524488e20c3e4beb2047ba334405" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.803477 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rlxx\" (UniqueName: \"kubernetes.io/projected/af0c7199-9850-4553-b644-371c7e305443-kube-api-access-7rlxx\") pod \"novacell0528b-account-delete-4cx2z\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.831545 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.831912 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api-log" containerID="cri-o://8add70c010322f7f578f7c468da2668563f210dfc294ebf9f41c56210d6165fb" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.832698 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api" containerID="cri-o://786c11653ae6fc057189678b362be2013431f0eae8133efcb4e6222525e8f2c6" gracePeriod=30 Nov 28 16:36:43 crc kubenswrapper[4954]: E1128 16:36:43.848931 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:43 crc kubenswrapper[4954]: E1128 16:36:43.849026 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data podName:8a252e1a-c96a-4f98-b24e-b224fedf344c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:44.849000639 +0000 UTC m=+1558.240669180 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data") pod "rabbitmq-server-0" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c") : configmap "rabbitmq-config-data" not found Nov 28 16:36:43 crc kubenswrapper[4954]: I1128 16:36:43.974809 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d145bb5-4e17-4345-b697-a0273aef5f49" path="/var/lib/kubelet/pods/0d145bb5-4e17-4345-b697-a0273aef5f49/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.027942 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e50d182-9126-4ef1-a377-8620e0357d4d" path="/var/lib/kubelet/pods/1e50d182-9126-4ef1-a377-8620e0357d4d/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.029405 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e" path="/var/lib/kubelet/pods/2e5d6d8d-6dcb-4147-9be4-cee3a30e1d9e/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.030075 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cf8cff4-ff61-4663-a23b-d68532916ee3" path="/var/lib/kubelet/pods/5cf8cff4-ff61-4663-a23b-d68532916ee3/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.034083 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac64805c-4e48-4c5c-99dd-f724049c6eae" path="/var/lib/kubelet/pods/ac64805c-4e48-4c5c-99dd-f724049c6eae/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.035498 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c10c6844-a43f-4167-90af-673636f4006b" path="/var/lib/kubelet/pods/c10c6844-a43f-4167-90af-673636f4006b/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.036717 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e78f971d-0af3-4789-a73a-b57790eb3dfa" path="/var/lib/kubelet/pods/e78f971d-0af3-4789-a73a-b57790eb3dfa/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.037514 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4" path="/var/lib/kubelet/pods/f8ae2a66-ae7e-46a5-8485-ed7a0c3293b4/volumes" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039324 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039364 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039379 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039391 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039401 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-64f4444cdb-4hdcg"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039412 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68f67467f4-8bd8x"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039423 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039435 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5b74b79fdc-99274"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039445 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-2675-account-create-update-gp79n"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039733 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5b74b79fdc-99274" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker-log" containerID="cri-o://d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.039981 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-log" containerID="cri-o://755de0385bf4e11b5c8e1c31c08371669daa12dda25d66b8c68b992fd6205979" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.040237 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-log" containerID="cri-o://a4137e9f8aff40189511ca5d877f9f9ea4572d1ba1b973060ea1671fe11c5291" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.040580 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener-log" containerID="cri-o://bd0ca4ad41624f1010f9badf4ad0d9925862fa25503f6b1962dabd2c1181aa11" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.040735 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68f67467f4-8bd8x" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api-log" containerID="cri-o://98b35779e547ce4c6fa5ddd4466450c48cf21079fb3271d4d7874c2a5254e844" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.040897 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-log" containerID="cri-o://5a2d7806f5c1628a5fc711b36527a7d4ae8db75c650b99cdcc2d5020d8a8b6cd" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.041216 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5b74b79fdc-99274" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker" containerID="cri-o://ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.041461 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-httpd" containerID="cri-o://bf5f47ec2adaca701e2261bfe9e2ba603aeebd57c0748bfc08cf9e1207069dde" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.041766 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-metadata" containerID="cri-o://a90a636e9b99d45f8039d9676ed4cf0cbaf5b8af82689455191e468cfb83faa4" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.041861 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener" containerID="cri-o://19d7c28fd7bc7d427f3a1148dfb20dae0ede2b943c17ee7da5ab666f31980fb2" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.041952 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68f67467f4-8bd8x" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api" containerID="cri-o://2ef96eee105583bb4f5aff04b8ac6bdfa71044142eddd069aee2fd7df3cab725" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.042028 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-api" containerID="cri-o://c22a271e9381f4b47783c1a7935315aa68fd7d56ae23e9e0515f0742f1172519" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.053834 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-2675-account-create-update-gp79n"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.080311 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.089337 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.089660 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="cf74e9b3-c272-47a3-bd81-1fae19e39236" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://029089c638fd3d3d0c67c5be6319770aaa300384c28cd15befea0e4ab7c84c99" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.146759 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-dqk5s"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.169137 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-dqk5s"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.230561 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.234130 4954 generic.go:334] "Generic (PLEG): container finished" podID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerID="64d4cc73b002f732d7801401e3ff287905c1abfeedbf8f510d4caf3a46f41b10" exitCode=143 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.234229 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf","Type":"ContainerDied","Data":"64d4cc73b002f732d7801401e3ff287905c1abfeedbf8f510d4caf3a46f41b10"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.244005 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pbz9g"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.255839 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-pbz9g"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.258403 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerName="rabbitmq" containerID="cri-o://dd5fdf435b0dbd1c11c86c7785b0da91c9296f8511aad91bbac2bb898f435f32" gracePeriod=604800 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.259321 4954 generic.go:334] "Generic (PLEG): container finished" podID="78bd4de4-5601-4771-b15c-c240e097519b" containerID="25d180f3bd81c9004a4db6ad4e61d48011cac275e60bfe32015be950a41aedcd" exitCode=143 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.259385 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-89b6465dd-zsqfd" event={"ID":"78bd4de4-5601-4771-b15c-c240e097519b","Type":"ContainerDied","Data":"25d180f3bd81c9004a4db6ad4e61d48011cac275e60bfe32015be950a41aedcd"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.282228 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.282496 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" containerName="nova-cell1-conductor-conductor" containerID="cri-o://b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.296633 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-twcpm"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.314752 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.314983 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="0f8d636b-e07a-46b1-91b3-899a395e3ce5" containerName="nova-cell0-conductor-conductor" containerID="cri-o://a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328830 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="a2bca9bc4ada3b3fc6bc0382c9d6662693c39fd5f31277e3ff70afcea99f052c" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328857 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="b3f9dee958ec0b21d8a91a7dab2c06039dfd85f545f18ca89dd9084c1a7e3b93" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328866 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="dcc789be8b7c6143065659690f57fb8fb47dcf73a3d5b9f4d2416eef216a83d6" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328875 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="3a8c57cddf7184457522309c1f75ea85035dd861fa97da4dc3572e601c38586a" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328881 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="201e26515a6013ab930eca8cf3032c3b5c434f99841737467e7187c6b2390519" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328887 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="d21ef8c65b44b65b7b352925f975d0e931485db1bbb827c59654bc000dd7fc7f" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328894 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="aa502d58af9315bcf5906e11b75470ef748ed5deffc2e4f87f6a86e7decf16e6" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328891 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"a2bca9bc4ada3b3fc6bc0382c9d6662693c39fd5f31277e3ff70afcea99f052c"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328934 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"b3f9dee958ec0b21d8a91a7dab2c06039dfd85f545f18ca89dd9084c1a7e3b93"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328946 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"dcc789be8b7c6143065659690f57fb8fb47dcf73a3d5b9f4d2416eef216a83d6"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328955 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"3a8c57cddf7184457522309c1f75ea85035dd861fa97da4dc3572e601c38586a"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328964 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"201e26515a6013ab930eca8cf3032c3b5c434f99841737467e7187c6b2390519"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328973 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"d21ef8c65b44b65b7b352925f975d0e931485db1bbb827c59654bc000dd7fc7f"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328981 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"aa502d58af9315bcf5906e11b75470ef748ed5deffc2e4f87f6a86e7decf16e6"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328991 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"a2014dcb5213ccabf2e5a08d052c48b1fa541345bc969ed78a02b8acbd0b836e"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.328901 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="a2014dcb5213ccabf2e5a08d052c48b1fa541345bc969ed78a02b8acbd0b836e" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.329011 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="5bb02d057581590a07be4f6c68ca4784ff007688bf3a7b3a1009e0c9d6023107" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.329025 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="a3de1c50e569442c4d137eb6c4f01fa275602e3c8780f037bca31766cae6424c" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.329104 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"5bb02d057581590a07be4f6c68ca4784ff007688bf3a7b3a1009e0c9d6023107"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.329115 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"a3de1c50e569442c4d137eb6c4f01fa275602e3c8780f037bca31766cae6424c"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.333675 4954 generic.go:334] "Generic (PLEG): container finished" podID="082f7647-b97a-4d57-83b1-ae69d61827b1" containerID="8f3b70dbbdb6e3dcdff478850bd55b90a960a4d6e61d20eb2e977c9634518dce" exitCode=137 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.346385 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-twcpm"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.354387 4954 generic.go:334] "Generic (PLEG): container finished" podID="7d8b87c9-4dd0-431c-a555-49141762763a" containerID="8add70c010322f7f578f7c468da2668563f210dfc294ebf9f41c56210d6165fb" exitCode=143 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.354486 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7d8b87c9-4dd0-431c-a555-49141762763a","Type":"ContainerDied","Data":"8add70c010322f7f578f7c468da2668563f210dfc294ebf9f41c56210d6165fb"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.379534 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-55p57_041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa/openstack-network-exporter/0.log" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.379580 4954 generic.go:334] "Generic (PLEG): container finished" podID="041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" containerID="e156c21bec3f6d7422ff833d8e664020da9203178d2bf9a0025944bc8713896f" exitCode=2 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.379630 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-55p57" event={"ID":"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa","Type":"ContainerDied","Data":"e156c21bec3f6d7422ff833d8e664020da9203178d2bf9a0025944bc8713896f"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.438629 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6/ovsdbserver-sb/0.log" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.438671 4954 generic.go:334] "Generic (PLEG): container finished" podID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerID="a8434b10b60eb2a163aaaf4ff3727ed2fe9014b9cf717f4ce97fcc65af984b89" exitCode=143 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.438719 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6","Type":"ContainerDied","Data":"a8434b10b60eb2a163aaaf4ff3727ed2fe9014b9cf717f4ce97fcc65af984b89"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.477472 4954 generic.go:334] "Generic (PLEG): container finished" podID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerID="346a713c806456b0618fdd74822beca4c32ebb91c0170c66b810d1f4091e8caf" exitCode=0 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.477518 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" event={"ID":"52dbe85a-ccd3-4527-af8d-17ad9748d3c4","Type":"ContainerDied","Data":"346a713c806456b0618fdd74822beca4c32ebb91c0170c66b810d1f4091e8caf"} Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.477651 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_892117aa-d306-4a1d-bf6e-b203b6337537/ovsdbserver-nb/0.log" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.477738 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.558730 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" containerID="cri-o://7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" gracePeriod=29 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.562653 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="rabbitmq" containerID="cri-o://bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d" gracePeriod=604800 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571273 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-combined-ca-bundle\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571309 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571332 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-metrics-certs-tls-certs\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571396 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfhnx\" (UniqueName: \"kubernetes.io/projected/892117aa-d306-4a1d-bf6e-b203b6337537-kube-api-access-xfhnx\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571441 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdbserver-nb-tls-certs\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571475 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdb-rundir\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571504 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-config\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.571568 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-scripts\") pod \"892117aa-d306-4a1d-bf6e-b203b6337537\" (UID: \"892117aa-d306-4a1d-bf6e-b203b6337537\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.577081 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.579089 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-config" (OuterVolumeSpecName: "config") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.579877 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-scripts" (OuterVolumeSpecName: "scripts") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.586037 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/892117aa-d306-4a1d-bf6e-b203b6337537-kube-api-access-xfhnx" (OuterVolumeSpecName: "kube-api-access-xfhnx") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "kube-api-access-xfhnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.599703 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.666644 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.675283 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.675310 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfhnx\" (UniqueName: \"kubernetes.io/projected/892117aa-d306-4a1d-bf6e-b203b6337537-kube-api-access-xfhnx\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.675322 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.675330 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.675340 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/892117aa-d306-4a1d-bf6e-b203b6337537-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.700909 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6/ovsdbserver-sb/0.log" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.701010 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.721030 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.735189 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.739784 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.757626 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerName="galera" containerID="cri-o://352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.781872 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.784339 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" containerID="cri-o://1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" gracePeriod=29 Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.801596 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.801625 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.801645 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.889885 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-55p57_041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa/openstack-network-exporter/0.log" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.890011 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.902934 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnkwt\" (UniqueName: \"kubernetes.io/projected/082f7647-b97a-4d57-83b1-ae69d61827b1-kube-api-access-pnkwt\") pod \"082f7647-b97a-4d57-83b1-ae69d61827b1\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903000 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhx6x\" (UniqueName: \"kubernetes.io/projected/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-kube-api-access-dhx6x\") pod \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903047 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-metrics-certs-tls-certs\") pod \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903070 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-swift-storage-0\") pod \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903107 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-combined-ca-bundle\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903134 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-metrics-certs-tls-certs\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903165 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovn-rundir\") pod \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903227 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903246 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovs-rundir\") pod \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903276 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-sb\") pod \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903308 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx85p\" (UniqueName: \"kubernetes.io/projected/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-kube-api-access-jx85p\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903333 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-combined-ca-bundle\") pod \"082f7647-b97a-4d57-83b1-ae69d61827b1\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903381 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-config\") pod \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903437 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-svc\") pod \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903461 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-config\") pod \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903485 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config\") pod \"082f7647-b97a-4d57-83b1-ae69d61827b1\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903508 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgb9w\" (UniqueName: \"kubernetes.io/projected/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-kube-api-access-jgb9w\") pod \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903511 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" (UID: "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903549 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdbserver-sb-tls-certs\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903662 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-scripts\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903750 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdb-rundir\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903910 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config-secret\") pod \"082f7647-b97a-4d57-83b1-ae69d61827b1\" (UID: \"082f7647-b97a-4d57-83b1-ae69d61827b1\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.903981 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-nb\") pod \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\" (UID: \"52dbe85a-ccd3-4527-af8d-17ad9748d3c4\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.904010 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-config\") pod \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\" (UID: \"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.904052 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-combined-ca-bundle\") pod \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\" (UID: \"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa\") " Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.907212 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.908000 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-config" (OuterVolumeSpecName: "config") pod "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" (UID: "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.924619 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" (UID: "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.926047 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.926314 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="1f8f01be-65c4-4a56-85e5-1b46ba5804ee" containerName="nova-scheduler-scheduler" containerID="cri-o://509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086" gracePeriod=30 Nov 28 16:36:44 crc kubenswrapper[4954]: E1128 16:36:44.936895 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.936934 4954 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: E1128 16:36:44.936977 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data podName:8a252e1a-c96a-4f98-b24e-b224fedf344c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:46.936963614 +0000 UTC m=+1560.328632145 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data") pod "rabbitmq-server-0" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c") : configmap "rabbitmq-config-data" not found Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.937713 4954 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.937735 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.937748 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.938692 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-config" (OuterVolumeSpecName: "config") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.949849 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-scripts" (OuterVolumeSpecName: "scripts") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.961112 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-kube-api-access-jx85p" (OuterVolumeSpecName: "kube-api-access-jx85p") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "kube-api-access-jx85p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.972074 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.975409 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-kube-api-access-dhx6x" (OuterVolumeSpecName: "kube-api-access-dhx6x") pod "52dbe85a-ccd3-4527-af8d-17ad9748d3c4" (UID: "52dbe85a-ccd3-4527-af8d-17ad9748d3c4"). InnerVolumeSpecName "kube-api-access-dhx6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.975520 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/082f7647-b97a-4d57-83b1-ae69d61827b1-kube-api-access-pnkwt" (OuterVolumeSpecName: "kube-api-access-pnkwt") pod "082f7647-b97a-4d57-83b1-ae69d61827b1" (UID: "082f7647-b97a-4d57-83b1-ae69d61827b1"). InnerVolumeSpecName "kube-api-access-pnkwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:44 crc kubenswrapper[4954]: I1128 16:36:44.977051 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-kube-api-access-jgb9w" (OuterVolumeSpecName: "kube-api-access-jgb9w") pod "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" (UID: "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa"). InnerVolumeSpecName "kube-api-access-jgb9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.039378 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx85p\" (UniqueName: \"kubernetes.io/projected/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-kube-api-access-jx85p\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.039817 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgb9w\" (UniqueName: \"kubernetes.io/projected/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-kube-api-access-jgb9w\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.039832 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.039846 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.039858 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnkwt\" (UniqueName: \"kubernetes.io/projected/082f7647-b97a-4d57-83b1-ae69d61827b1-kube-api-access-pnkwt\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.039869 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhx6x\" (UniqueName: \"kubernetes.io/projected/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-kube-api-access-dhx6x\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.039907 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.052218 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "892117aa-d306-4a1d-bf6e-b203b6337537" (UID: "892117aa-d306-4a1d-bf6e-b203b6337537"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.069729 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" (UID: "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.073943 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "082f7647-b97a-4d57-83b1-ae69d61827b1" (UID: "082f7647-b97a-4d57-83b1-ae69d61827b1"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.109778 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.114797 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "082f7647-b97a-4d57-83b1-ae69d61827b1" (UID: "082f7647-b97a-4d57-83b1-ae69d61827b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.121385 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.131178 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.131266 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.131615 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.134775 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.172337 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.172412 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.174645 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glanceb261-account-delete-s574s"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.194259 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement4c07-account-delete-52zll"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.202219 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/892117aa-d306-4a1d-bf6e-b203b6337537-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.202254 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.202267 4954 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.202278 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.255904 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-config" (OuterVolumeSpecName: "config") pod "52dbe85a-ccd3-4527-af8d-17ad9748d3c4" (UID: "52dbe85a-ccd3-4527-af8d-17ad9748d3c4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: W1128 16:36:45.271302 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b7b0248_e1e6_40f5_9edb_0dbece5f12ae.slice/crio-92b9091f07db66fb54ad9ed15806e06d63ea795ad367a682413404705add1fa7 WatchSource:0}: Error finding container 92b9091f07db66fb54ad9ed15806e06d63ea795ad367a682413404705add1fa7: Status 404 returned error can't find the container with id 92b9091f07db66fb54ad9ed15806e06d63ea795ad367a682413404705add1fa7 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.282308 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.304068 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.304114 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.343187 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.357866 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "52dbe85a-ccd3-4527-af8d-17ad9748d3c4" (UID: "52dbe85a-ccd3-4527-af8d-17ad9748d3c4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.405736 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "082f7647-b97a-4d57-83b1-ae69d61827b1" (UID: "082f7647-b97a-4d57-83b1-ae69d61827b1"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.407183 4954 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/082f7647-b97a-4d57-83b1-ae69d61827b1-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.407207 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.407221 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.407333 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.407439 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data podName:ca81ef12-eb13-468e-81fc-0fdf6aba8830 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:49.407417207 +0000 UTC m=+1562.799085808 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data") pod "rabbitmq-cell1-server-0" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.420420 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.441357 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.443295 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.447323 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.447399 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="0f8d636b-e07a-46b1-91b3-899a395e3ce5" containerName="nova-cell0-conductor-conductor" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.465652 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "52dbe85a-ccd3-4527-af8d-17ad9748d3c4" (UID: "52dbe85a-ccd3-4527-af8d-17ad9748d3c4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.509446 4954 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.509467 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.520431 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "52dbe85a-ccd3-4527-af8d-17ad9748d3c4" (UID: "52dbe85a-ccd3-4527-af8d-17ad9748d3c4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.522210 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement4c07-account-delete-52zll" event={"ID":"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae","Type":"ContainerStarted","Data":"92b9091f07db66fb54ad9ed15806e06d63ea795ad367a682413404705add1fa7"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.537996 4954 generic.go:334] "Generic (PLEG): container finished" podID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerID="d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec" exitCode=143 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.538123 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b74b79fdc-99274" event={"ID":"9023ad7d-6621-4ed8-aec4-bd1d0db53088","Type":"ContainerDied","Data":"d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.545765 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderd6b0-account-delete-plvlb"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.546452 4954 generic.go:334] "Generic (PLEG): container finished" podID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerID="42c4ec9ba6dc6f636e97ef6169d3c32ab858524488e20c3e4beb2047ba334405" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.546565 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bdd54468f-gzf48" event={"ID":"6ce804c9-edba-4404-9099-4c0f102aa1b2","Type":"ContainerDied","Data":"42c4ec9ba6dc6f636e97ef6169d3c32ab858524488e20c3e4beb2047ba334405"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.556652 4954 generic.go:334] "Generic (PLEG): container finished" podID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.556746 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerDied","Data":"1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.556884 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "52dbe85a-ccd3-4527-af8d-17ad9748d3c4" (UID: "52dbe85a-ccd3-4527-af8d-17ad9748d3c4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.593950 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" (UID: "1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.609999 4954 generic.go:334] "Generic (PLEG): container finished" podID="70613221-3087-4dc3-9f41-86eb6fe88041" containerID="bd0ca4ad41624f1010f9badf4ad0d9925862fa25503f6b1962dabd2c1181aa11" exitCode=143 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.610095 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" event={"ID":"70613221-3087-4dc3-9f41-86eb6fe88041","Type":"ContainerDied","Data":"bd0ca4ad41624f1010f9badf4ad0d9925862fa25503f6b1962dabd2c1181aa11"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.620401 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glanceb261-account-delete-s574s" event={"ID":"acf16584-ed76-41dc-955b-17e86a277627","Type":"ContainerStarted","Data":"6742f7621ca6f85c0722c7e24d39b77c3f2dd38c9edd4f647d408d1485887e2c"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.623116 4954 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.623154 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.623166 4954 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/52dbe85a-ccd3-4527-af8d-17ad9748d3c4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.623492 4954 scope.go:117] "RemoveContainer" containerID="8f3b70dbbdb6e3dcdff478850bd55b90a960a4d6e61d20eb2e977c9634518dce" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.625572 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.653856 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-55p57_041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa/openstack-network-exporter/0.log" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.653974 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-55p57" event={"ID":"041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa","Type":"ContainerDied","Data":"e58182aeb38cd0e17ba7dc4d14483d0f519a74821e8ce0ee1c137548d93895ba"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.654027 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-55p57" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.658868 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron9335-account-delete-9jxvf"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.663410 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" (UID: "041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.671458 4954 generic.go:334] "Generic (PLEG): container finished" podID="cf74e9b3-c272-47a3-bd81-1fae19e39236" containerID="029089c638fd3d3d0c67c5be6319770aaa300384c28cd15befea0e4ab7c84c99" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.671557 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cf74e9b3-c272-47a3-bd81-1fae19e39236","Type":"ContainerDied","Data":"029089c638fd3d3d0c67c5be6319770aaa300384c28cd15befea0e4ab7c84c99"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.696810 4954 generic.go:334] "Generic (PLEG): container finished" podID="642e0138-17d8-47e0-a67d-51a758291f7e" containerID="a4137e9f8aff40189511ca5d877f9f9ea4572d1ba1b973060ea1671fe11c5291" exitCode=143 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.696920 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"642e0138-17d8-47e0-a67d-51a758291f7e","Type":"ContainerDied","Data":"a4137e9f8aff40189511ca5d877f9f9ea4572d1ba1b973060ea1671fe11c5291"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.707321 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-5fb676564c-cfds2"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.715124 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-5fb676564c-cfds2" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-httpd" containerID="cri-o://97b634ca797866be1255f1100d6abda787c78438108b953d4c36c125fda1b7b9" gracePeriod=30 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.715580 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-5fb676564c-cfds2" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-server" containerID="cri-o://6537c4c8cd8dd3fab3e5160d3fe8c38e05653b1a2fb44a373fda2650bb61ce42" gracePeriod=30 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.726584 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.726682 4954 generic.go:334] "Generic (PLEG): container finished" podID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerID="755de0385bf4e11b5c8e1c31c08371669daa12dda25d66b8c68b992fd6205979" exitCode=143 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.726717 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c362e30-8109-411f-9f89-21c7c28da6c2","Type":"ContainerDied","Data":"755de0385bf4e11b5c8e1c31c08371669daa12dda25d66b8c68b992fd6205979"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.732722 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapif92b-account-delete-sj7n5"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.743386 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican3a2d-account-delete-cznv8"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.744015 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" event={"ID":"52dbe85a-ccd3-4527-af8d-17ad9748d3c4","Type":"ContainerDied","Data":"0659b27ad1225d8e02c463bc35598a923f0eb41e7a5417f13901801c5ec87d01"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.744060 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.746127 4954 generic.go:334] "Generic (PLEG): container finished" podID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" containerID="b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.746195 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"43af6ac7-70c5-43ff-84a0-0f6b6159ae66","Type":"ContainerDied","Data":"b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.756070 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0528b-account-delete-4cx2z"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.756272 4954 generic.go:334] "Generic (PLEG): container finished" podID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerID="5a2d7806f5c1628a5fc711b36527a7d4ae8db75c650b99cdcc2d5020d8a8b6cd" exitCode=143 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.756363 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5","Type":"ContainerDied","Data":"5a2d7806f5c1628a5fc711b36527a7d4ae8db75c650b99cdcc2d5020d8a8b6cd"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.781915 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_892117aa-d306-4a1d-bf6e-b203b6337537/ovsdbserver-nb/0.log" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.782061 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"892117aa-d306-4a1d-bf6e-b203b6337537","Type":"ContainerDied","Data":"53a7f3f169542d2a66c28107a568f2f29f80883acfd4ac2d4aa6255e68262080"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.782186 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.789590 4954 generic.go:334] "Generic (PLEG): container finished" podID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerID="01194eaa4540cc51e694ffb69c1bc9bb2fcad75b283509d2f2d288ca98fa6388" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.789640 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c4439c3d-c90f-4b13-87a4-01c211cec875","Type":"ContainerDied","Data":"01194eaa4540cc51e694ffb69c1bc9bb2fcad75b283509d2f2d288ca98fa6388"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.792636 4954 generic.go:334] "Generic (PLEG): container finished" podID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerID="98b35779e547ce4c6fa5ddd4466450c48cf21079fb3271d4d7874c2a5254e844" exitCode=143 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.792730 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68f67467f4-8bd8x" event={"ID":"45c8821a-baab-4e3c-8ffb-f4fe71722666","Type":"ContainerDied","Data":"98b35779e547ce4c6fa5ddd4466450c48cf21079fb3271d4d7874c2a5254e844"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.822757 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6/ovsdbserver-sb/0.log" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.823031 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.823980 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6","Type":"ContainerDied","Data":"44938311a6c954ceaa3fadd03c845344f2a50768d0efc651b5be5a5e7f8b8366"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.835366 4954 scope.go:117] "RemoveContainer" containerID="e156c21bec3f6d7422ff833d8e664020da9203178d2bf9a0025944bc8713896f" Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.894422 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.900131 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.902163 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="082f7647-b97a-4d57-83b1-ae69d61827b1" path="/var/lib/kubelet/pods/082f7647-b97a-4d57-83b1-ae69d61827b1/volumes" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.903265 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780" path="/var/lib/kubelet/pods/2c6ac4f8-3168-46d6-9adc-d6ea0b2f3780/volumes" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.903889 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ebb6843-0b32-449c-9b8f-d817c3690e67" path="/var/lib/kubelet/pods/2ebb6843-0b32-449c-9b8f-d817c3690e67/volumes" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.904711 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c07c750-8357-4ba2-a802-86a6f90ec496" path="/var/lib/kubelet/pods/9c07c750-8357-4ba2-a802-86a6f90ec496/volumes" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.905386 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e15090c7-7e49-4a9e-bad2-65482065e048" path="/var/lib/kubelet/pods/e15090c7-7e49-4a9e-bad2-65482065e048/volumes" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.906940 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-dr7bj"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.909705 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:45 crc kubenswrapper[4954]: E1128 16:36:45.909760 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="ovn-northd" Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926556 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="ec66bc366041d8589f9f9c599f24a864c0e0bb563e8e92223216a3166545a6a1" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926592 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="d6384cb554ea3695e28b1473c8d2c8cabce3a920e913bc674060bf33f2b8c9a0" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926601 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="e269517f2a950366081e2a4332d4c243079436ed72f1864b1a8bba98e71728ad" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926626 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="41ade0bb5410fb26da6929cbab355527e23f47061fb8a9b3b265c2dba9186585" exitCode=0 Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926646 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"ec66bc366041d8589f9f9c599f24a864c0e0bb563e8e92223216a3166545a6a1"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926673 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"d6384cb554ea3695e28b1473c8d2c8cabce3a920e913bc674060bf33f2b8c9a0"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926684 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"e269517f2a950366081e2a4332d4c243079436ed72f1864b1a8bba98e71728ad"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.926696 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"41ade0bb5410fb26da6929cbab355527e23f47061fb8a9b3b265c2dba9186585"} Nov 28 16:36:45 crc kubenswrapper[4954]: I1128 16:36:45.976637 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-dr7bj"] Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.027712 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.048584 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.059828 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.071204 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.089827 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-55p57"] Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.103940 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-55p57"] Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.141336 4954 scope.go:117] "RemoveContainer" containerID="346a713c806456b0618fdd74822beca4c32ebb91c0170c66b810d1f4091e8caf" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.168570 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 16:36:46 crc kubenswrapper[4954]: E1128 16:36:46.325200 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775 is running failed: container process not found" containerID="b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:46 crc kubenswrapper[4954]: E1128 16:36:46.325987 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775 is running failed: container process not found" containerID="b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:46 crc kubenswrapper[4954]: E1128 16:36:46.326320 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775 is running failed: container process not found" containerID="b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:36:46 crc kubenswrapper[4954]: E1128 16:36:46.326351 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" containerName="nova-cell1-conductor-conductor" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.521588 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.772909 4954 scope.go:117] "RemoveContainer" containerID="a8575b0b135c0db7c127a5b6990ea8e36cac76374ed530e9480e0950abe2fca1" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.864125 4954 scope.go:117] "RemoveContainer" containerID="0bc97c121bcab5b752053d9d791ed5b026bc09edb0e37b0c2ab456c187dcfed8" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.919729 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.935173 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.968766 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971498 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-vencrypt-tls-certs\") pod \"cf74e9b3-c272-47a3-bd81-1fae19e39236\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971668 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkbdx\" (UniqueName: \"kubernetes.io/projected/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-kube-api-access-mkbdx\") pod \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971697 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-config-data\") pod \"cf74e9b3-c272-47a3-bd81-1fae19e39236\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971713 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-nova-novncproxy-tls-certs\") pod \"cf74e9b3-c272-47a3-bd81-1fae19e39236\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971779 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-combined-ca-bundle\") pod \"cf74e9b3-c272-47a3-bd81-1fae19e39236\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971794 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9559p\" (UniqueName: \"kubernetes.io/projected/cf74e9b3-c272-47a3-bd81-1fae19e39236-kube-api-access-9559p\") pod \"cf74e9b3-c272-47a3-bd81-1fae19e39236\" (UID: \"cf74e9b3-c272-47a3-bd81-1fae19e39236\") " Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971819 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-config-data\") pod \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.971899 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-combined-ca-bundle\") pod \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\" (UID: \"43af6ac7-70c5-43ff-84a0-0f6b6159ae66\") " Nov 28 16:36:46 crc kubenswrapper[4954]: E1128 16:36:46.972316 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:46 crc kubenswrapper[4954]: E1128 16:36:46.972357 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data podName:8a252e1a-c96a-4f98-b24e-b224fedf344c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:50.972344901 +0000 UTC m=+1564.364013442 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data") pod "rabbitmq-server-0" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c") : configmap "rabbitmq-config-data" not found Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.979745 4954 generic.go:334] "Generic (PLEG): container finished" podID="0f8d636b-e07a-46b1-91b3-899a395e3ce5" containerID="a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" exitCode=0 Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.979809 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.979877 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f8d636b-e07a-46b1-91b3-899a395e3ce5","Type":"ContainerDied","Data":"a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b"} Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.979918 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"0f8d636b-e07a-46b1-91b3-899a395e3ce5","Type":"ContainerDied","Data":"b1bcee61ae09916f73021f0a765fd39e01ffaa7ac8a9ebdc4c5b3e9311a83c72"} Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.988248 4954 generic.go:334] "Generic (PLEG): container finished" podID="70613221-3087-4dc3-9f41-86eb6fe88041" containerID="19d7c28fd7bc7d427f3a1148dfb20dae0ede2b943c17ee7da5ab666f31980fb2" exitCode=0 Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.988357 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" event={"ID":"70613221-3087-4dc3-9f41-86eb6fe88041","Type":"ContainerDied","Data":"19d7c28fd7bc7d427f3a1148dfb20dae0ede2b943c17ee7da5ab666f31980fb2"} Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.989220 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.995934 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf74e9b3-c272-47a3-bd81-1fae19e39236-kube-api-access-9559p" (OuterVolumeSpecName: "kube-api-access-9559p") pod "cf74e9b3-c272-47a3-bd81-1fae19e39236" (UID: "cf74e9b3-c272-47a3-bd81-1fae19e39236"). InnerVolumeSpecName "kube-api-access-9559p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.995999 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-kube-api-access-mkbdx" (OuterVolumeSpecName: "kube-api-access-mkbdx") pod "43af6ac7-70c5-43ff-84a0-0f6b6159ae66" (UID: "43af6ac7-70c5-43ff-84a0-0f6b6159ae66"). InnerVolumeSpecName "kube-api-access-mkbdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.996572 4954 generic.go:334] "Generic (PLEG): container finished" podID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerID="e0e955ad82d7153e1ce49532c6e27790509f8ccd441ac5a41fe5281a1f7f3c3a" exitCode=0 Nov 28 16:36:46 crc kubenswrapper[4954]: I1128 16:36:46.996642 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c4439c3d-c90f-4b13-87a4-01c211cec875","Type":"ContainerDied","Data":"e0e955ad82d7153e1ce49532c6e27790509f8ccd441ac5a41fe5281a1f7f3c3a"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.007777 4954 scope.go:117] "RemoveContainer" containerID="107051ce6919142522558805719fa4abac53f52c0b68b1e403a6ef8355295c53" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.021822 4954 generic.go:334] "Generic (PLEG): container finished" podID="acf16584-ed76-41dc-955b-17e86a277627" containerID="4086bd643882fbb57adba19a323b9ec69c8f4e9d979fee602b638028e29a8143" exitCode=0 Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.021997 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glanceb261-account-delete-s574s" event={"ID":"acf16584-ed76-41dc-955b-17e86a277627","Type":"ContainerDied","Data":"4086bd643882fbb57adba19a323b9ec69c8f4e9d979fee602b638028e29a8143"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.059833 4954 generic.go:334] "Generic (PLEG): container finished" podID="1b7b0248-e1e6-40f5-9edb-0dbece5f12ae" containerID="5ee583633bf711261f9be82f4e55e1ba9821071c17a3ab4a989453611d8fafec" exitCode=0 Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.059925 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement4c07-account-delete-52zll" event={"ID":"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae","Type":"ContainerDied","Data":"5ee583633bf711261f9be82f4e55e1ba9821071c17a3ab4a989453611d8fafec"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.060068 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.060432 4954 scope.go:117] "RemoveContainer" containerID="08f7109509c86b6eda033eb417ad90a9b3211cb608828eb97354929fe65260e4" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.065796 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron9335-account-delete-9jxvf" event={"ID":"762dc5ac-a8d3-4b91-806c-079e568530b1","Type":"ContainerStarted","Data":"aaf1262e16cb90a45fa7d090ebef8b947e7ea5d9b1f558ad8a157ccb67c6adbf"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.068806 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderd6b0-account-delete-plvlb" event={"ID":"d887f697-ad36-44e6-8011-878d0a78b3bf","Type":"ContainerStarted","Data":"47da1476b395494d1e6db9fee186e2710c9b35ada234d738a95e66e6debe8dcf"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.069048 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.073786 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-combined-ca-bundle\") pod \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.073885 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjxqx\" (UniqueName: \"kubernetes.io/projected/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kube-api-access-rjxqx\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.073906 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.073938 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-generated\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074003 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-operator-scripts\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074021 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-galera-tls-certs\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074045 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-config-data\") pod \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074075 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgv9x\" (UniqueName: \"kubernetes.io/projected/0f8d636b-e07a-46b1-91b3-899a395e3ce5-kube-api-access-zgv9x\") pod \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\" (UID: \"0f8d636b-e07a-46b1-91b3-899a395e3ce5\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074131 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kolla-config\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074155 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-default\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074338 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-combined-ca-bundle\") pod \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\" (UID: \"2fb4ddd8-d914-431c-a39f-28a0c6b45354\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074914 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkbdx\" (UniqueName: \"kubernetes.io/projected/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-kube-api-access-mkbdx\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.074928 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9559p\" (UniqueName: \"kubernetes.io/projected/cf74e9b3-c272-47a3-bd81-1fae19e39236-kube-api-access-9559p\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.084505 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f8d636b-e07a-46b1-91b3-899a395e3ce5-kube-api-access-zgv9x" (OuterVolumeSpecName: "kube-api-access-zgv9x") pod "0f8d636b-e07a-46b1-91b3-899a395e3ce5" (UID: "0f8d636b-e07a-46b1-91b3-899a395e3ce5"). InnerVolumeSpecName "kube-api-access-zgv9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.086805 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0528b-account-delete-4cx2z" event={"ID":"af0c7199-9850-4553-b644-371c7e305443","Type":"ContainerStarted","Data":"1ec4857c3a2d180b112120227a1185915008e6ef80fac9e092c9f0ebc29bb655"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.086993 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.087671 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.088109 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.088183 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.089904 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.095377 4954 generic.go:334] "Generic (PLEG): container finished" podID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerID="352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99" exitCode=0 Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.095479 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2fb4ddd8-d914-431c-a39f-28a0c6b45354","Type":"ContainerDied","Data":"352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.095592 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.104778 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kube-api-access-rjxqx" (OuterVolumeSpecName: "kube-api-access-rjxqx") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "kube-api-access-rjxqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.114028 4954 generic.go:334] "Generic (PLEG): container finished" podID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerID="ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3" exitCode=0 Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.114125 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b74b79fdc-99274" event={"ID":"9023ad7d-6621-4ed8-aec4-bd1d0db53088","Type":"ContainerDied","Data":"ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.114215 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b74b79fdc-99274" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.120236 4954 scope.go:117] "RemoveContainer" containerID="a8434b10b60eb2a163aaaf4ff3727ed2fe9014b9cf717f4ce97fcc65af984b89" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.121057 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"43af6ac7-70c5-43ff-84a0-0f6b6159ae66","Type":"ContainerDied","Data":"5fcfd5dc09dbb15a96c3f82dc25914b5ddddbb38994aec53a9e84ffb97651a0b"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.121155 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.127709 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif92b-account-delete-sj7n5" event={"ID":"1bc7ec75-0145-47f2-8193-28c8f17b572a","Type":"ContainerStarted","Data":"c705e87d6dab562e3079674833001b497d39ab9d92e83220fb67c4568a538703"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.138519 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cf74e9b3-c272-47a3-bd81-1fae19e39236","Type":"ContainerDied","Data":"d0df4df86517cbd59da3f0bb803cc7c70ed2d25f268c0322a9b958411a1d31b3"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.138626 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.142050 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican3a2d-account-delete-cznv8" event={"ID":"6abe9424-aabe-4ae6-8032-79b92583d31d","Type":"ContainerStarted","Data":"975a0e0a8eecf7ee795a4e53cace889204a81ebfe3bfbd86dd25ed5d15f3112a"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.146864 4954 generic.go:334] "Generic (PLEG): container finished" podID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerID="6537c4c8cd8dd3fab3e5160d3fe8c38e05653b1a2fb44a373fda2650bb61ce42" exitCode=0 Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.147002 4954 generic.go:334] "Generic (PLEG): container finished" podID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerID="97b634ca797866be1255f1100d6abda787c78438108b953d4c36c125fda1b7b9" exitCode=0 Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.146908 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5fb676564c-cfds2" event={"ID":"3022fb04-fc0f-44b1-9f97-3893aa4bdd68","Type":"ContainerDied","Data":"6537c4c8cd8dd3fab3e5160d3fe8c38e05653b1a2fb44a373fda2650bb61ce42"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.147188 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5fb676564c-cfds2" event={"ID":"3022fb04-fc0f-44b1-9f97-3893aa4bdd68","Type":"ContainerDied","Data":"97b634ca797866be1255f1100d6abda787c78438108b953d4c36c125fda1b7b9"} Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.161593 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "mysql-db") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176750 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data\") pod \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176817 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v8bx\" (UniqueName: \"kubernetes.io/projected/c4439c3d-c90f-4b13-87a4-01c211cec875-kube-api-access-4v8bx\") pod \"c4439c3d-c90f-4b13-87a4-01c211cec875\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176837 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lclcp\" (UniqueName: \"kubernetes.io/projected/70613221-3087-4dc3-9f41-86eb6fe88041-kube-api-access-lclcp\") pod \"70613221-3087-4dc3-9f41-86eb6fe88041\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176866 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data\") pod \"70613221-3087-4dc3-9f41-86eb6fe88041\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176884 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70613221-3087-4dc3-9f41-86eb6fe88041-logs\") pod \"70613221-3087-4dc3-9f41-86eb6fe88041\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176903 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-scripts\") pod \"c4439c3d-c90f-4b13-87a4-01c211cec875\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176934 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dl2j9\" (UniqueName: \"kubernetes.io/projected/9023ad7d-6621-4ed8-aec4-bd1d0db53088-kube-api-access-dl2j9\") pod \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.176989 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-combined-ca-bundle\") pod \"70613221-3087-4dc3-9f41-86eb6fe88041\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177060 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-combined-ca-bundle\") pod \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177134 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data\") pod \"c4439c3d-c90f-4b13-87a4-01c211cec875\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177155 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data-custom\") pod \"c4439c3d-c90f-4b13-87a4-01c211cec875\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177204 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-combined-ca-bundle\") pod \"c4439c3d-c90f-4b13-87a4-01c211cec875\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177328 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data-custom\") pod \"70613221-3087-4dc3-9f41-86eb6fe88041\" (UID: \"70613221-3087-4dc3-9f41-86eb6fe88041\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177364 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9023ad7d-6621-4ed8-aec4-bd1d0db53088-logs\") pod \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177380 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data-custom\") pod \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\" (UID: \"9023ad7d-6621-4ed8-aec4-bd1d0db53088\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.177397 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c4439c3d-c90f-4b13-87a4-01c211cec875-etc-machine-id\") pod \"c4439c3d-c90f-4b13-87a4-01c211cec875\" (UID: \"c4439c3d-c90f-4b13-87a4-01c211cec875\") " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.178739 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.178769 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.178783 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.178797 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgv9x\" (UniqueName: \"kubernetes.io/projected/0f8d636b-e07a-46b1-91b3-899a395e3ce5-kube-api-access-zgv9x\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.178810 4954 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.178821 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2fb4ddd8-d914-431c-a39f-28a0c6b45354-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.178832 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjxqx\" (UniqueName: \"kubernetes.io/projected/2fb4ddd8-d914-431c-a39f-28a0c6b45354-kube-api-access-rjxqx\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.186165 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9023ad7d-6621-4ed8-aec4-bd1d0db53088-logs" (OuterVolumeSpecName: "logs") pod "9023ad7d-6621-4ed8-aec4-bd1d0db53088" (UID: "9023ad7d-6621-4ed8-aec4-bd1d0db53088"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.198208 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c4439c3d-c90f-4b13-87a4-01c211cec875-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c4439c3d-c90f-4b13-87a4-01c211cec875" (UID: "c4439c3d-c90f-4b13-87a4-01c211cec875"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.198512 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70613221-3087-4dc3-9f41-86eb6fe88041-logs" (OuterVolumeSpecName: "logs") pod "70613221-3087-4dc3-9f41-86eb6fe88041" (UID: "70613221-3087-4dc3-9f41-86eb6fe88041"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.204360 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70613221-3087-4dc3-9f41-86eb6fe88041-kube-api-access-lclcp" (OuterVolumeSpecName: "kube-api-access-lclcp") pod "70613221-3087-4dc3-9f41-86eb6fe88041" (UID: "70613221-3087-4dc3-9f41-86eb6fe88041"). InnerVolumeSpecName "kube-api-access-lclcp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.206762 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4439c3d-c90f-4b13-87a4-01c211cec875-kube-api-access-4v8bx" (OuterVolumeSpecName: "kube-api-access-4v8bx") pod "c4439c3d-c90f-4b13-87a4-01c211cec875" (UID: "c4439c3d-c90f-4b13-87a4-01c211cec875"). InnerVolumeSpecName "kube-api-access-4v8bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.218992 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-scripts" (OuterVolumeSpecName: "scripts") pod "c4439c3d-c90f-4b13-87a4-01c211cec875" (UID: "c4439c3d-c90f-4b13-87a4-01c211cec875"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.240150 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "70613221-3087-4dc3-9f41-86eb6fe88041" (UID: "70613221-3087-4dc3-9f41-86eb6fe88041"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.245672 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c4439c3d-c90f-4b13-87a4-01c211cec875" (UID: "c4439c3d-c90f-4b13-87a4-01c211cec875"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.245740 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9023ad7d-6621-4ed8-aec4-bd1d0db53088" (UID: "9023ad7d-6621-4ed8-aec4-bd1d0db53088"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.253144 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9023ad7d-6621-4ed8-aec4-bd1d0db53088-kube-api-access-dl2j9" (OuterVolumeSpecName: "kube-api-access-dl2j9") pod "9023ad7d-6621-4ed8-aec4-bd1d0db53088" (UID: "9023ad7d-6621-4ed8-aec4-bd1d0db53088"). InnerVolumeSpecName "kube-api-access-dl2j9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.263064 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf74e9b3-c272-47a3-bd81-1fae19e39236" (UID: "cf74e9b3-c272-47a3-bd81-1fae19e39236"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281764 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281797 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281806 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9023ad7d-6621-4ed8-aec4-bd1d0db53088-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281814 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281823 4954 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c4439c3d-c90f-4b13-87a4-01c211cec875-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281832 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281840 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v8bx\" (UniqueName: \"kubernetes.io/projected/c4439c3d-c90f-4b13-87a4-01c211cec875-kube-api-access-4v8bx\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281850 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lclcp\" (UniqueName: \"kubernetes.io/projected/70613221-3087-4dc3-9f41-86eb6fe88041-kube-api-access-lclcp\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281857 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/70613221-3087-4dc3-9f41-86eb6fe88041-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281865 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.281875 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dl2j9\" (UniqueName: \"kubernetes.io/projected/9023ad7d-6621-4ed8-aec4-bd1d0db53088-kube-api-access-dl2j9\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.448440 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9023ad7d-6621-4ed8-aec4-bd1d0db53088" (UID: "9023ad7d-6621-4ed8-aec4-bd1d0db53088"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.485184 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.510215 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.587686 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.649786 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.164:8776/healthcheck\": read tcp 10.217.0.2:33382->10.217.0.164:8776: read: connection reset by peer" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.658053 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "cf74e9b3-c272-47a3-bd81-1fae19e39236" (UID: "cf74e9b3-c272-47a3-bd81-1fae19e39236"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.668488 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-config-data" (OuterVolumeSpecName: "config-data") pod "cf74e9b3-c272-47a3-bd81-1fae19e39236" (UID: "cf74e9b3-c272-47a3-bd81-1fae19e39236"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.669376 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": read tcp 10.217.0.2:41568->10.217.0.202:8775: read: connection reset by peer" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.669490 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": read tcp 10.217.0.2:41552->10.217.0.202:8775: read: connection reset by peer" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.680870 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68f67467f4-8bd8x" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:54764->10.217.0.159:9311: read: connection reset by peer" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.680925 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68f67467f4-8bd8x" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:54756->10.217.0.159:9311: read: connection reset by peer" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.689664 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.689700 4954 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.690999 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.693349 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-config-data" (OuterVolumeSpecName: "config-data") pod "0f8d636b-e07a-46b1-91b3-899a395e3ce5" (UID: "0f8d636b-e07a-46b1-91b3-899a395e3ce5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.707186 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-config-data" (OuterVolumeSpecName: "config-data") pod "43af6ac7-70c5-43ff-84a0-0f6b6159ae66" (UID: "43af6ac7-70c5-43ff-84a0-0f6b6159ae66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.718973 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f8d636b-e07a-46b1-91b3-899a395e3ce5" (UID: "0f8d636b-e07a-46b1-91b3-899a395e3ce5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.723234 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43af6ac7-70c5-43ff-84a0-0f6b6159ae66" (UID: "43af6ac7-70c5-43ff-84a0-0f6b6159ae66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.725689 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "70613221-3087-4dc3-9f41-86eb6fe88041" (UID: "70613221-3087-4dc3-9f41-86eb6fe88041"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.795009 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.795039 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.795050 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f8d636b-e07a-46b1-91b3-899a395e3ce5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.795062 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43af6ac7-70c5-43ff-84a0-0f6b6159ae66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.795074 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: I1128 16:36:47.795085 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:47 crc kubenswrapper[4954]: E1128 16:36:47.906927 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:48 crc kubenswrapper[4954]: E1128 16:36:47.912729 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:47.916060 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "cf74e9b3-c272-47a3-bd81-1fae19e39236" (UID: "cf74e9b3-c272-47a3-bd81-1fae19e39236"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:47.924448 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data" (OuterVolumeSpecName: "config-data") pod "70613221-3087-4dc3-9f41-86eb6fe88041" (UID: "70613221-3087-4dc3-9f41-86eb6fe88041"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:48 crc kubenswrapper[4954]: E1128 16:36:47.933077 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:36:48 crc kubenswrapper[4954]: E1128 16:36:47.933146 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="1f8f01be-65c4-4a56-85e5-1b46ba5804ee" containerName="nova-scheduler-scheduler" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:47.966854 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data" (OuterVolumeSpecName: "config-data") pod "9023ad7d-6621-4ed8-aec4-bd1d0db53088" (UID: "9023ad7d-6621-4ed8-aec4-bd1d0db53088"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:47.984729 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "2fb4ddd8-d914-431c-a39f-28a0c6b45354" (UID: "2fb4ddd8-d914-431c-a39f-28a0c6b45354"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.004239 4954 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf74e9b3-c272-47a3-bd81-1fae19e39236-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.004274 4954 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2fb4ddd8-d914-431c-a39f-28a0c6b45354-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.004289 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9023ad7d-6621-4ed8-aec4-bd1d0db53088-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.004300 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70613221-3087-4dc3-9f41-86eb6fe88041-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.081826 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4439c3d-c90f-4b13-87a4-01c211cec875" (UID: "c4439c3d-c90f-4b13-87a4-01c211cec875"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.113463 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.152684 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data" (OuterVolumeSpecName: "config-data") pod "c4439c3d-c90f-4b13-87a4-01c211cec875" (UID: "c4439c3d-c90f-4b13-87a4-01c211cec875"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.217885 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4439c3d-c90f-4b13-87a4-01c211cec875-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.279024 4954 generic.go:334] "Generic (PLEG): container finished" podID="642e0138-17d8-47e0-a67d-51a758291f7e" containerID="a90a636e9b99d45f8039d9676ed4cf0cbaf5b8af82689455191e468cfb83faa4" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.285063 4954 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapif92b-account-delete-sj7n5" secret="" err="secret \"galera-openstack-dockercfg-5bnld\" not found" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.285467 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" path="/var/lib/kubelet/pods/041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa/volumes" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.300359 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" path="/var/lib/kubelet/pods/1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6/volumes" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.301247 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" path="/var/lib/kubelet/pods/52dbe85a-ccd3-4527-af8d-17ad9748d3c4/volumes" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.302298 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" path="/var/lib/kubelet/pods/892117aa-d306-4a1d-bf6e-b203b6337537/volumes" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.306022 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.306051 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"642e0138-17d8-47e0-a67d-51a758291f7e","Type":"ContainerDied","Data":"a90a636e9b99d45f8039d9676ed4cf0cbaf5b8af82689455191e468cfb83faa4"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.306075 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif92b-account-delete-sj7n5" event={"ID":"1bc7ec75-0145-47f2-8193-28c8f17b572a","Type":"ContainerStarted","Data":"8abf93f87a5b33cbda36466498d343caaa5ecf640b48334af8af9fc2fef2dc20"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.306109 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.306354 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="9c381eb3-f466-40b7-a962-6782db85678c" containerName="kube-state-metrics" containerID="cri-o://722b392e14e483b0e2f1d174ef2d136d04550f8d796a74666ddf9f8253b0b8ec" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.320269 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-central-agent" containerID="cri-o://f3b3be1f06f6ac14c8e5f8001d2e6742a30e971699d14e2b691a416c75f5f699" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.328430 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="proxy-httpd" containerID="cri-o://eec27ebb646c765f0cde3b186b7adb59c251c51c9a65684667cfbc3c2314c2a3" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.328587 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="sg-core" containerID="cri-o://12e64b79d883c0960f7d869751df2054fc61218ac1ea6063c9a82354936f8e17" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.328852 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-notification-agent" containerID="cri-o://190064bf648d40792650ba482e7ac25d6d7c2fabc259baf1d0b1b45c5abca205" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.341693 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5fb676564c-cfds2" event={"ID":"3022fb04-fc0f-44b1-9f97-3893aa4bdd68","Type":"ContainerDied","Data":"47dde57efce82402d7d8343557f1b456ad132ea3ccd746b7fa93ff0c28c1f4c6"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.341735 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47dde57efce82402d7d8343557f1b456ad132ea3ccd746b7fa93ff0c28c1f4c6" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.343734 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.343926 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" containerName="memcached" containerID="cri-o://13ae9aa3934e30fe09e390d2eb8d342d20df09c16cb5c9d3cd643b0cba0bac5e" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.355864 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-s9vhw"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.363165 4954 generic.go:334] "Generic (PLEG): container finished" podID="d887f697-ad36-44e6-8011-878d0a78b3bf" containerID="1606ea4346525e54b196374f42bc0cc24f176a431b2adc531d6ed468b2112dea" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.363260 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderd6b0-account-delete-plvlb" event={"ID":"d887f697-ad36-44e6-8011-878d0a78b3bf","Type":"ContainerDied","Data":"1606ea4346525e54b196374f42bc0cc24f176a431b2adc531d6ed468b2112dea"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.366110 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-ntrfz"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.380793 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican3a2d-account-delete-cznv8" event={"ID":"6abe9424-aabe-4ae6-8032-79b92583d31d","Type":"ContainerStarted","Data":"d6c73c953daac7374bd820d28af1e1380b668dc95777a7b94a7a994654c5848a"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.404415 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2fb4ddd8-d914-431c-a39f-28a0c6b45354","Type":"ContainerDied","Data":"970dfc526f56fecb685934a29eb459b08bf4eab23eab54c39b13b30034bbd8ed"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.405254 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-s9vhw"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.414671 4954 generic.go:334] "Generic (PLEG): container finished" podID="7d8b87c9-4dd0-431c-a555-49141762763a" containerID="786c11653ae6fc057189678b362be2013431f0eae8133efcb4e6222525e8f2c6" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.414778 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7d8b87c9-4dd0-431c-a555-49141762763a","Type":"ContainerDied","Data":"786c11653ae6fc057189678b362be2013431f0eae8133efcb4e6222525e8f2c6"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.415094 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-ntrfz"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.416926 4954 generic.go:334] "Generic (PLEG): container finished" podID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerID="2ef96eee105583bb4f5aff04b8ac6bdfa71044142eddd069aee2fd7df3cab725" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.416983 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68f67467f4-8bd8x" event={"ID":"45c8821a-baab-4e3c-8ffb-f4fe71722666","Type":"ContainerDied","Data":"2ef96eee105583bb4f5aff04b8ac6bdfa71044142eddd069aee2fd7df3cab725"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.418570 4954 generic.go:334] "Generic (PLEG): container finished" podID="af0c7199-9850-4553-b644-371c7e305443" containerID="3e12a3d3b371177e96edcb8710ee4e74f64dc99510cabdd5643a86ea0b95b060" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.418694 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0528b-account-delete-4cx2z" event={"ID":"af0c7199-9850-4553-b644-371c7e305443","Type":"ContainerDied","Data":"3e12a3d3b371177e96edcb8710ee4e74f64dc99510cabdd5643a86ea0b95b060"} Nov 28 16:36:48 crc kubenswrapper[4954]: E1128 16:36:48.422718 4954 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:48 crc kubenswrapper[4954]: E1128 16:36:48.422785 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts podName:1bc7ec75-0145-47f2-8193-28c8f17b572a nodeName:}" failed. No retries permitted until 2025-11-28 16:36:48.922763451 +0000 UTC m=+1562.314432002 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts") pod "novaapif92b-account-delete-sj7n5" (UID: "1bc7ec75-0145-47f2-8193-28c8f17b572a") : configmap "openstack-scripts" not found Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.425170 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5767fbd5b4-jmjzp"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.425568 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-5767fbd5b4-jmjzp" podUID="caa7b547-96ec-4119-87b1-fa14697ba9d1" containerName="keystone-api" containerID="cri-o://de3a57e387f1582f6570d0c5f5e659d725722d24b50d157c852c9079752bde22" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.429439 4954 generic.go:334] "Generic (PLEG): container finished" podID="78bd4de4-5601-4771-b15c-c240e097519b" containerID="b9815c657f848312db17e79942f5eddf0b33ee714edd4d18a3accefb78385c5d" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.429600 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-89b6465dd-zsqfd" event={"ID":"78bd4de4-5601-4771-b15c-c240e097519b","Type":"ContainerDied","Data":"b9815c657f848312db17e79942f5eddf0b33ee714edd4d18a3accefb78385c5d"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.438100 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.448000 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-bwl88"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.456537 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-bwl88"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.457709 4954 generic.go:334] "Generic (PLEG): container finished" podID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerID="bf5f47ec2adaca701e2261bfe9e2ba603aeebd57c0748bfc08cf9e1207069dde" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.457775 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c362e30-8109-411f-9f89-21c7c28da6c2","Type":"ContainerDied","Data":"bf5f47ec2adaca701e2261bfe9e2ba603aeebd57c0748bfc08cf9e1207069dde"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.462034 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapif92b-account-delete-sj7n5" podStartSLOduration=6.46201407 podStartE2EDuration="6.46201407s" podCreationTimestamp="2025-11-28 16:36:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:36:48.320103353 +0000 UTC m=+1561.711771894" watchObservedRunningTime="2025-11-28 16:36:48.46201407 +0000 UTC m=+1561.853682611" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.472643 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b74b79fdc-99274" event={"ID":"9023ad7d-6621-4ed8-aec4-bd1d0db53088","Type":"ContainerDied","Data":"8455f37c6e73a768d424f4c43e7120c1cd2c6f075de8e2b4ab571f22475d3bf5"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.476880 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron9335-account-delete-9jxvf" event={"ID":"762dc5ac-a8d3-4b91-806c-079e568530b1","Type":"ContainerStarted","Data":"eba850d500c1b9eff48d4c4269433dc00620e092eabe63ce9cb2cc328b7bd894"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.497125 4954 generic.go:334] "Generic (PLEG): container finished" podID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerID="c22a271e9381f4b47783c1a7935315aa68fd7d56ae23e9e0515f0742f1172519" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.497183 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5","Type":"ContainerDied","Data":"c22a271e9381f4b47783c1a7935315aa68fd7d56ae23e9e0515f0742f1172519"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.502565 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-80ff-account-create-update-b5gtr"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.515862 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-80ff-account-create-update-b5gtr"] Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.518466 4954 generic.go:334] "Generic (PLEG): container finished" podID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerID="0a58c23a2643ee01442026f1cd5c2931589d7ded1516de8835e9e562824e613e" exitCode=0 Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.518539 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf","Type":"ContainerDied","Data":"0a58c23a2643ee01442026f1cd5c2931589d7ded1516de8835e9e562824e613e"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.518585 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf","Type":"ContainerDied","Data":"5f951126b4ad54dbec1f2f72816800f72e796ac41c13f2e702b000878cc7abc6"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.518600 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f951126b4ad54dbec1f2f72816800f72e796ac41c13f2e702b000878cc7abc6" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.528994 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" event={"ID":"70613221-3087-4dc3-9f41-86eb6fe88041","Type":"ContainerDied","Data":"40cc15a3ffab4757a656853fe0b94cc825ab38bc670397b184e94b66d46182cf"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.529119 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64f4444cdb-4hdcg" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.552155 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c4439c3d-c90f-4b13-87a4-01c211cec875","Type":"ContainerDied","Data":"822046e6502bba3f5d237bf8d1ee0f710eb4ce9b4d9eb0e34a6234ad3191b3a9"} Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.552250 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:36:48 crc kubenswrapper[4954]: I1128 16:36:48.738597 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="1d348de7-2e67-41df-8d59-4692491ea145" containerName="galera" containerID="cri-o://8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07" gracePeriod=30 Nov 28 16:36:48 crc kubenswrapper[4954]: E1128 16:36:48.944119 4954 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:48 crc kubenswrapper[4954]: E1128 16:36:48.944573 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts podName:1bc7ec75-0145-47f2-8193-28c8f17b572a nodeName:}" failed. No retries permitted until 2025-11-28 16:36:49.944533223 +0000 UTC m=+1563.336201764 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts") pod "novaapif92b-account-delete-sj7n5" (UID: "1bc7ec75-0145-47f2-8193-28c8f17b572a") : configmap "openstack-scripts" not found Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.302213 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cd5cbd7b9-dr7bj" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.196:5353: i/o timeout" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.346874 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/memcached-0" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" containerName="memcached" probeResult="failure" output="dial tcp 10.217.0.106:11211: connect: connection refused" Nov 28 16:36:49 crc kubenswrapper[4954]: E1128 16:36:49.460621 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:49 crc kubenswrapper[4954]: E1128 16:36:49.460701 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data podName:ca81ef12-eb13-468e-81fc-0fdf6aba8830 nodeName:}" failed. No retries permitted until 2025-11-28 16:36:57.460682287 +0000 UTC m=+1570.852350828 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data") pod "rabbitmq-cell1-server-0" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830") : configmap "rabbitmq-cell1-config-data" not found Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.555631 4954 scope.go:117] "RemoveContainer" containerID="a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.582285 4954 generic.go:334] "Generic (PLEG): container finished" podID="1f8f01be-65c4-4a56-85e5-1b46ba5804ee" containerID="509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086" exitCode=0 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.582411 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f8f01be-65c4-4a56-85e5-1b46ba5804ee","Type":"ContainerDied","Data":"509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.582447 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f8f01be-65c4-4a56-85e5-1b46ba5804ee","Type":"ContainerDied","Data":"5e51b63f07aa7be73cc0b5f92d7d06ac103b6c27879ac6f5b3d5ab3d20d8add6"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.582464 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e51b63f07aa7be73cc0b5f92d7d06ac103b6c27879ac6f5b3d5ab3d20d8add6" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.583125 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.587834 4954 generic.go:334] "Generic (PLEG): container finished" podID="7cef279d-a444-456d-8add-1998974a6e08" containerID="eec27ebb646c765f0cde3b186b7adb59c251c51c9a65684667cfbc3c2314c2a3" exitCode=0 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.587860 4954 generic.go:334] "Generic (PLEG): container finished" podID="7cef279d-a444-456d-8add-1998974a6e08" containerID="12e64b79d883c0960f7d869751df2054fc61218ac1ea6063c9a82354936f8e17" exitCode=2 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.587869 4954 generic.go:334] "Generic (PLEG): container finished" podID="7cef279d-a444-456d-8add-1998974a6e08" containerID="f3b3be1f06f6ac14c8e5f8001d2e6742a30e971699d14e2b691a416c75f5f699" exitCode=0 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.587910 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerDied","Data":"eec27ebb646c765f0cde3b186b7adb59c251c51c9a65684667cfbc3c2314c2a3"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.587934 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerDied","Data":"12e64b79d883c0960f7d869751df2054fc61218ac1ea6063c9a82354936f8e17"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.587946 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerDied","Data":"f3b3be1f06f6ac14c8e5f8001d2e6742a30e971699d14e2b691a416c75f5f699"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.589465 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glanceb261-account-delete-s574s" event={"ID":"acf16584-ed76-41dc-955b-17e86a277627","Type":"ContainerDied","Data":"6742f7621ca6f85c0722c7e24d39b77c3f2dd38c9edd4f647d408d1485887e2c"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.589488 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6742f7621ca6f85c0722c7e24d39b77c3f2dd38c9edd4f647d408d1485887e2c" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.596647 4954 scope.go:117] "RemoveContainer" containerID="a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.596795 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"7d8b87c9-4dd0-431c-a555-49141762763a","Type":"ContainerDied","Data":"fcac9bad37dd256d23d145390f15de9e1ee2ff1d201714d9b00e4c6f7ebaca72"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.596826 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcac9bad37dd256d23d145390f15de9e1ee2ff1d201714d9b00e4c6f7ebaca72" Nov 28 16:36:49 crc kubenswrapper[4954]: E1128 16:36:49.597896 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b\": container with ID starting with a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b not found: ID does not exist" containerID="a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.597936 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b"} err="failed to get container status \"a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b\": rpc error: code = NotFound desc = could not find container \"a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b\": container with ID starting with a8794425542f29accfe0774660c34d0ff41060061de3967fc9e3e89fe2773d8b not found: ID does not exist" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.597954 4954 scope.go:117] "RemoveContainer" containerID="352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.610477 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68f67467f4-8bd8x" event={"ID":"45c8821a-baab-4e3c-8ffb-f4fe71722666","Type":"ContainerDied","Data":"96352828906f018ee67c1437994daad7406a1d074cf14422caf6c2dade72a758"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.610527 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96352828906f018ee67c1437994daad7406a1d074cf14422caf6c2dade72a758" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.616371 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1c362e30-8109-411f-9f89-21c7c28da6c2","Type":"ContainerDied","Data":"c8496aa73e00fffae11f764f6f8e07ac7bb4a380c7dd119a8dda5aedbb83d547"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.616407 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8496aa73e00fffae11f764f6f8e07ac7bb4a380c7dd119a8dda5aedbb83d547" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.616877 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-bdd54468f-gzf48" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.158:9696/\": dial tcp 10.217.0.158:9696: connect: connection refused" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.617225 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.620166 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement4c07-account-delete-52zll" event={"ID":"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae","Type":"ContainerDied","Data":"92b9091f07db66fb54ad9ed15806e06d63ea795ad367a682413404705add1fa7"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.620208 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92b9091f07db66fb54ad9ed15806e06d63ea795ad367a682413404705add1fa7" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.637097 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"642e0138-17d8-47e0-a67d-51a758291f7e","Type":"ContainerDied","Data":"c8c37ba9170312790571f11b8cb1cfccb49ce1f56757e7a5b453681557696a42"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.637147 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8c37ba9170312790571f11b8cb1cfccb49ce1f56757e7a5b453681557696a42" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.639257 4954 generic.go:334] "Generic (PLEG): container finished" podID="1bc7ec75-0145-47f2-8193-28c8f17b572a" containerID="8abf93f87a5b33cbda36466498d343caaa5ecf640b48334af8af9fc2fef2dc20" exitCode=0 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.639332 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif92b-account-delete-sj7n5" event={"ID":"1bc7ec75-0145-47f2-8193-28c8f17b572a","Type":"ContainerDied","Data":"8abf93f87a5b33cbda36466498d343caaa5ecf640b48334af8af9fc2fef2dc20"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.651032 4954 generic.go:334] "Generic (PLEG): container finished" podID="762dc5ac-a8d3-4b91-806c-079e568530b1" containerID="eba850d500c1b9eff48d4c4269433dc00620e092eabe63ce9cb2cc328b7bd894" exitCode=0 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.651097 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron9335-account-delete-9jxvf" event={"ID":"762dc5ac-a8d3-4b91-806c-079e568530b1","Type":"ContainerDied","Data":"eba850d500c1b9eff48d4c4269433dc00620e092eabe63ce9cb2cc328b7bd894"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.657580 4954 generic.go:334] "Generic (PLEG): container finished" podID="6abe9424-aabe-4ae6-8032-79b92583d31d" containerID="d6c73c953daac7374bd820d28af1e1380b668dc95777a7b94a7a994654c5848a" exitCode=0 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.657628 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican3a2d-account-delete-cznv8" event={"ID":"6abe9424-aabe-4ae6-8032-79b92583d31d","Type":"ContainerDied","Data":"d6c73c953daac7374bd820d28af1e1380b668dc95777a7b94a7a994654c5848a"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.658296 4954 scope.go:117] "RemoveContainer" containerID="a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.664304 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.670286 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-89b6465dd-zsqfd" event={"ID":"78bd4de4-5601-4771-b15c-c240e097519b","Type":"ContainerDied","Data":"88183e5b8a8e4859701caec0aed68ac6316a8e0641d681a574641c9b87e4d96f"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.670378 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88183e5b8a8e4859701caec0aed68ac6316a8e0641d681a574641c9b87e4d96f" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.672479 4954 generic.go:334] "Generic (PLEG): container finished" podID="9c381eb3-f466-40b7-a962-6782db85678c" containerID="722b392e14e483b0e2f1d174ef2d136d04550f8d796a74666ddf9f8253b0b8ec" exitCode=2 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.672542 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9c381eb3-f466-40b7-a962-6782db85678c","Type":"ContainerDied","Data":"722b392e14e483b0e2f1d174ef2d136d04550f8d796a74666ddf9f8253b0b8ec"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.672581 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9c381eb3-f466-40b7-a962-6782db85678c","Type":"ContainerDied","Data":"ea4e61ca56864d9de459baad0f4c81082f30c2ef2d1fe46df68e1ceeee287427"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.672594 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea4e61ca56864d9de459baad0f4c81082f30c2ef2d1fe46df68e1ceeee287427" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.674301 4954 generic.go:334] "Generic (PLEG): container finished" podID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" containerID="13ae9aa3934e30fe09e390d2eb8d342d20df09c16cb5c9d3cd643b0cba0bac5e" exitCode=0 Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.674349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"afe4302a-413f-48e1-90a9-3f1178e5c6f7","Type":"ContainerDied","Data":"13ae9aa3934e30fe09e390d2eb8d342d20df09c16cb5c9d3cd643b0cba0bac5e"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.675911 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.675857 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5","Type":"ContainerDied","Data":"1a2263f6f7b183effffaa5a53ff69918d73a88d7567a168ae22f5767d83d11b9"} Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768279 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-internal-tls-certs\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768327 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdglh\" (UniqueName: \"kubernetes.io/projected/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-kube-api-access-hdglh\") pod \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768380 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-combined-ca-bundle\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768580 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-etc-swift\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768628 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768654 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-log-httpd\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768690 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-logs\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768753 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-httpd-run\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768792 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-public-tls-certs\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768818 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-run-httpd\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768873 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-config-data\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768910 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-public-tls-certs\") pod \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768949 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwp25\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-kube-api-access-xwp25\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.768990 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-public-tls-certs\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769014 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn5s6\" (UniqueName: \"kubernetes.io/projected/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-kube-api-access-jn5s6\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769046 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-scripts\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769083 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-logs\") pod \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769123 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-config-data\") pod \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\" (UID: \"3022fb04-fc0f-44b1-9f97-3893aa4bdd68\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769213 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-combined-ca-bundle\") pod \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769244 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769288 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-internal-tls-certs\") pod \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.769316 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-config-data\") pod \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\" (UID: \"7cf746a5-7be5-4e2d-a7f8-18878e2a41a5\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.778089 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-logs" (OuterVolumeSpecName: "logs") pod "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" (UID: "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.781388 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.782671 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-logs" (OuterVolumeSpecName: "logs") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.786790 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.786920 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.790461 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-kube-api-access-hdglh" (OuterVolumeSpecName: "kube-api-access-hdglh") pod "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" (UID: "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5"). InnerVolumeSpecName "kube-api-access-hdglh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.791357 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-kube-api-access-xwp25" (OuterVolumeSpecName: "kube-api-access-xwp25") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "kube-api-access-xwp25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.805668 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-scripts" (OuterVolumeSpecName: "scripts") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.809758 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.823805 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.826690 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-kube-api-access-jn5s6" (OuterVolumeSpecName: "kube-api-access-jn5s6") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "kube-api-access-jn5s6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.849176 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-config-data" (OuterVolumeSpecName: "config-data") pod "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" (UID: "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.872450 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.880980 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="032f188e-b87a-49fc-838f-f024006df4df" path="/var/lib/kubelet/pods/032f188e-b87a-49fc-838f-f024006df4df/volumes" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.881681 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="597d1db1-e180-4bef-9c2c-d09998c23f2a" path="/var/lib/kubelet/pods/597d1db1-e180-4bef-9c2c-d09998c23f2a/volumes" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.882350 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c69ec0b-b98f-4181-99bb-7b7f29c20fa9" path="/var/lib/kubelet/pods/7c69ec0b-b98f-4181-99bb-7b7f29c20fa9/volumes" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.883076 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2" path="/var/lib/kubelet/pods/8ee953d6-2ebc-41aa-9751-d6b2c8eb2dd2/volumes" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.884072 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle\") pod \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\" (UID: \"91dcac80-4f79-4fbb-81c5-4fd24cb69ccf\") " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885733 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwp25\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-kube-api-access-xwp25\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885760 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn5s6\" (UniqueName: \"kubernetes.io/projected/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-kube-api-access-jn5s6\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885771 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885782 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885792 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885803 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdglh\" (UniqueName: \"kubernetes.io/projected/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-kube-api-access-hdglh\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885815 4954 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885840 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885851 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885860 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885871 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.885881 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: W1128 16:36:49.886385 4954 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf/volumes/kubernetes.io~secret/combined-ca-bundle Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.886431 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.891484 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.893978 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.903144 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.905333 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.915093 4954 scope.go:117] "RemoveContainer" containerID="ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.916190 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.917244 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.920706 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" (UID: "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.928487 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.930437 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5b74b79fdc-99274"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.936638 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.942176 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-5b74b79fdc-99274"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.945976 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.950137 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.957753 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" (UID: "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.962444 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.967057 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.970880 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.971557 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.972048 4954 scope.go:117] "RemoveContainer" containerID="d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.984018 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.989804 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.989837 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.989849 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.989860 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.989871 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.989882 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:49 crc kubenswrapper[4954]: E1128 16:36:49.990809 4954 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 16:36:49 crc kubenswrapper[4954]: I1128 16:36:49.993650 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-64f4444cdb-4hdcg"] Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.007888 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-64f4444cdb-4hdcg"] Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.008266 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.008690 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.010597 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts podName:1bc7ec75-0145-47f2-8193-28c8f17b572a nodeName:}" failed. No retries permitted until 2025-11-28 16:36:52.010561575 +0000 UTC m=+1565.402230116 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts") pod "novaapif92b-account-delete-sj7n5" (UID: "1bc7ec75-0145-47f2-8193-28c8f17b572a") : configmap "openstack-scripts" not found Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.021571 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.023804 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.026618 4954 scope.go:117] "RemoveContainer" containerID="b45576e777726ad60fd11ec47af67513edf0fef268b3da28b29e6246985b5775" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.033252 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.036185 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.045870 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.048261 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.061773 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" (UID: "7cf746a5-7be5-4e2d-a7f8-18878e2a41a5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.064904 4954 scope.go:117] "RemoveContainer" containerID="029089c638fd3d3d0c67c5be6319770aaa300384c28cd15befea0e4ab7c84c99" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.069738 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-config-data" (OuterVolumeSpecName: "config-data") pod "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" (UID: "91dcac80-4f79-4fbb-81c5-4fd24cb69ccf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.072660 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-config-data" (OuterVolumeSpecName: "config-data") pod "3022fb04-fc0f-44b1-9f97-3893aa4bdd68" (UID: "3022fb04-fc0f-44b1-9f97-3893aa4bdd68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.098131 4954 scope.go:117] "RemoveContainer" containerID="352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.098981 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99\": container with ID starting with 352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99 not found: ID does not exist" containerID="352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.099248 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99"} err="failed to get container status \"352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99\": rpc error: code = NotFound desc = could not find container \"352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99\": container with ID starting with 352627ac7579bc056309216860ed790605608d2615b1a1c40f0c28e291d87c99 not found: ID does not exist" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.099439 4954 scope.go:117] "RemoveContainer" containerID="a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.100527 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513\": container with ID starting with a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513 not found: ID does not exist" containerID="a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.100628 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513"} err="failed to get container status \"a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513\": rpc error: code = NotFound desc = could not find container \"a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513\": container with ID starting with a96f76202555a162e9e08bfee3b503efcbf0bf4e6bb65e95750633f6a100b513 not found: ID does not exist" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.100668 4954 scope.go:117] "RemoveContainer" containerID="ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.101080 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3\": container with ID starting with ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3 not found: ID does not exist" containerID="ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.101104 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3"} err="failed to get container status \"ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3\": rpc error: code = NotFound desc = could not find container \"ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3\": container with ID starting with ad74a48962c94f92742b21cb97317608d199491c209ca0f9aba391fcde7835b3 not found: ID does not exist" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.101118 4954 scope.go:117] "RemoveContainer" containerID="d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.101847 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec\": container with ID starting with d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec not found: ID does not exist" containerID="d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.101902 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec"} err="failed to get container status \"d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec\": rpc error: code = NotFound desc = could not find container \"d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec\": container with ID starting with d51ea12ac29bf1c9df355eb528d2411b3c64385297f631ec4383475fa281dbec not found: ID does not exist" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.101932 4954 scope.go:117] "RemoveContainer" containerID="19d7c28fd7bc7d427f3a1148dfb20dae0ede2b943c17ee7da5ab666f31980fb2" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.106260 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kolla-config\") pod \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.107653 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data-custom\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.107879 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-public-tls-certs\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.107527 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "afe4302a-413f-48e1-90a9-3f1178e5c6f7" (UID: "afe4302a-413f-48e1-90a9-3f1178e5c6f7"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.107137 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.107165 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.108387 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt64v\" (UniqueName: \"kubernetes.io/projected/78bd4de4-5601-4771-b15c-c240e097519b-kube-api-access-kt64v\") pod \"78bd4de4-5601-4771-b15c-c240e097519b\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.109712 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjstg\" (UniqueName: \"kubernetes.io/projected/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-kube-api-access-cjstg\") pod \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.109855 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acf16584-ed76-41dc-955b-17e86a277627-operator-scripts\") pod \"acf16584-ed76-41dc-955b-17e86a277627\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.109479 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.110905 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfvxt\" (UniqueName: \"kubernetes.io/projected/642e0138-17d8-47e0-a67d-51a758291f7e-kube-api-access-kfvxt\") pod \"642e0138-17d8-47e0-a67d-51a758291f7e\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.111042 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmwss\" (UniqueName: \"kubernetes.io/projected/1c362e30-8109-411f-9f89-21c7c28da6c2-kube-api-access-cmwss\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.111186 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-config-data\") pod \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.111331 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.111512 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78bd4de4-5601-4771-b15c-c240e097519b-logs\") pod \"78bd4de4-5601-4771-b15c-c240e097519b\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.111639 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-logs\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.111883 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-internal-tls-certs\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112012 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bx4zq\" (UniqueName: \"kubernetes.io/projected/45c8821a-baab-4e3c-8ffb-f4fe71722666-kube-api-access-bx4zq\") pod \"45c8821a-baab-4e3c-8ffb-f4fe71722666\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112157 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/642e0138-17d8-47e0-a67d-51a758291f7e-logs\") pod \"642e0138-17d8-47e0-a67d-51a758291f7e\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112282 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d8b87c9-4dd0-431c-a555-49141762763a-etc-machine-id\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112436 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-internal-tls-certs\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112570 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-combined-ca-bundle\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112683 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112810 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b87c9-4dd0-431c-a555-49141762763a-logs\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112925 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-operator-scripts\") pod \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\" (UID: \"1b7b0248-e1e6-40f5-9edb-0dbece5f12ae\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.110978 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/acf16584-ed76-41dc-955b-17e86a277627-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "acf16584-ed76-41dc-955b-17e86a277627" (UID: "acf16584-ed76-41dc-955b-17e86a277627"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.111697 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112565 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-logs" (OuterVolumeSpecName: "logs") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.112973 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78bd4de4-5601-4771-b15c-c240e097519b-logs" (OuterVolumeSpecName: "logs") pod "78bd4de4-5601-4771-b15c-c240e097519b" (UID: "78bd4de4-5601-4771-b15c-c240e097519b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.113084 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7d8b87c9-4dd0-431c-a555-49141762763a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.111763 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.113283 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-certs\") pod \"9c381eb3-f466-40b7-a962-6782db85678c\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.113392 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-combined-ca-bundle\") pod \"78bd4de4-5601-4771-b15c-c240e097519b\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.113515 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-combined-ca-bundle\") pod \"45c8821a-baab-4e3c-8ffb-f4fe71722666\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.113640 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-public-tls-certs\") pod \"45c8821a-baab-4e3c-8ffb-f4fe71722666\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.113885 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-config\") pod \"9c381eb3-f466-40b7-a962-6782db85678c\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.119102 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-internal-tls-certs\") pod \"45c8821a-baab-4e3c-8ffb-f4fe71722666\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.119317 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-config-data\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.119475 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kube-api-access-jbnrh\") pod \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.119621 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-scripts\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.119940 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-combined-ca-bundle\") pod \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.120188 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-combined-ca-bundle\") pod \"642e0138-17d8-47e0-a67d-51a758291f7e\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.120303 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ld8zk\" (UniqueName: \"kubernetes.io/projected/9c381eb3-f466-40b7-a962-6782db85678c-kube-api-access-ld8zk\") pod \"9c381eb3-f466-40b7-a962-6782db85678c\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.120447 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-config-data\") pod \"78bd4de4-5601-4771-b15c-c240e097519b\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.120528 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data-custom\") pod \"45c8821a-baab-4e3c-8ffb-f4fe71722666\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.120658 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-combined-ca-bundle\") pod \"9c381eb3-f466-40b7-a962-6782db85678c\" (UID: \"9c381eb3-f466-40b7-a962-6782db85678c\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.120775 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-config-data\") pod \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.120905 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-memcached-tls-certs\") pod \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.121039 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-combined-ca-bundle\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.121158 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8qrx\" (UniqueName: \"kubernetes.io/projected/7d8b87c9-4dd0-431c-a555-49141762763a-kube-api-access-k8qrx\") pod \"7d8b87c9-4dd0-431c-a555-49141762763a\" (UID: \"7d8b87c9-4dd0-431c-a555-49141762763a\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.121282 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-combined-ca-bundle\") pod \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\" (UID: \"afe4302a-413f-48e1-90a9-3f1178e5c6f7\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.121378 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data\") pod \"45c8821a-baab-4e3c-8ffb-f4fe71722666\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.121495 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skklp\" (UniqueName: \"kubernetes.io/projected/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-kube-api-access-skklp\") pod \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\" (UID: \"1f8f01be-65c4-4a56-85e5-1b46ba5804ee\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.121659 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-scripts\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.121794 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-config-data\") pod \"642e0138-17d8-47e0-a67d-51a758291f7e\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.122050 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-scripts\") pod \"78bd4de4-5601-4771-b15c-c240e097519b\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.122505 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-nova-metadata-tls-certs\") pod \"642e0138-17d8-47e0-a67d-51a758291f7e\" (UID: \"642e0138-17d8-47e0-a67d-51a758291f7e\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.122665 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-public-tls-certs\") pod \"78bd4de4-5601-4771-b15c-c240e097519b\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.122819 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45c8821a-baab-4e3c-8ffb-f4fe71722666-logs\") pod \"45c8821a-baab-4e3c-8ffb-f4fe71722666\" (UID: \"45c8821a-baab-4e3c-8ffb-f4fe71722666\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.122939 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-httpd-run\") pod \"1c362e30-8109-411f-9f89-21c7c28da6c2\" (UID: \"1c362e30-8109-411f-9f89-21c7c28da6c2\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.123078 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vsjn\" (UniqueName: \"kubernetes.io/projected/acf16584-ed76-41dc-955b-17e86a277627-kube-api-access-2vsjn\") pod \"acf16584-ed76-41dc-955b-17e86a277627\" (UID: \"acf16584-ed76-41dc-955b-17e86a277627\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.123307 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-internal-tls-certs\") pod \"78bd4de4-5601-4771-b15c-c240e097519b\" (UID: \"78bd4de4-5601-4771-b15c-c240e097519b\") " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125133 4954 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125519 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125632 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125713 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125792 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acf16584-ed76-41dc-955b-17e86a277627-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125871 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125947 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78bd4de4-5601-4771-b15c-c240e097519b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.126035 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.126113 4954 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7d8b87c9-4dd0-431c-a555-49141762763a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.126186 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.126258 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3022fb04-fc0f-44b1-9f97-3893aa4bdd68-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.122831 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.113234 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.146148 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.146232 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.114601 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d8b87c9-4dd0-431c-a555-49141762763a-logs" (OuterVolumeSpecName: "logs") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125414 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/642e0138-17d8-47e0-a67d-51a758291f7e-kube-api-access-kfvxt" (OuterVolumeSpecName: "kube-api-access-kfvxt") pod "642e0138-17d8-47e0-a67d-51a758291f7e" (UID: "642e0138-17d8-47e0-a67d-51a758291f7e"). InnerVolumeSpecName "kube-api-access-kfvxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.125582 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-kube-api-access-cjstg" (OuterVolumeSpecName: "kube-api-access-cjstg") pod "1b7b0248-e1e6-40f5-9edb-0dbece5f12ae" (UID: "1b7b0248-e1e6-40f5-9edb-0dbece5f12ae"). InnerVolumeSpecName "kube-api-access-cjstg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.134941 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45c8821a-baab-4e3c-8ffb-f4fe71722666-logs" (OuterVolumeSpecName: "logs") pod "45c8821a-baab-4e3c-8ffb-f4fe71722666" (UID: "45c8821a-baab-4e3c-8ffb-f4fe71722666"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.139447 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1b7b0248-e1e6-40f5-9edb-0dbece5f12ae" (UID: "1b7b0248-e1e6-40f5-9edb-0dbece5f12ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.139635 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78bd4de4-5601-4771-b15c-c240e097519b-kube-api-access-kt64v" (OuterVolumeSpecName: "kube-api-access-kt64v") pod "78bd4de4-5601-4771-b15c-c240e097519b" (UID: "78bd4de4-5601-4771-b15c-c240e097519b"). InnerVolumeSpecName "kube-api-access-kt64v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.141236 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.144269 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/642e0138-17d8-47e0-a67d-51a758291f7e-logs" (OuterVolumeSpecName: "logs") pod "642e0138-17d8-47e0-a67d-51a758291f7e" (UID: "642e0138-17d8-47e0-a67d-51a758291f7e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.145305 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-config-data" (OuterVolumeSpecName: "config-data") pod "afe4302a-413f-48e1-90a9-3f1178e5c6f7" (UID: "afe4302a-413f-48e1-90a9-3f1178e5c6f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.158106 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c362e30-8109-411f-9f89-21c7c28da6c2-kube-api-access-cmwss" (OuterVolumeSpecName: "kube-api-access-cmwss") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "kube-api-access-cmwss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.163593 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45c8821a-baab-4e3c-8ffb-f4fe71722666-kube-api-access-bx4zq" (OuterVolumeSpecName: "kube-api-access-bx4zq") pod "45c8821a-baab-4e3c-8ffb-f4fe71722666" (UID: "45c8821a-baab-4e3c-8ffb-f4fe71722666"). InnerVolumeSpecName "kube-api-access-bx4zq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.166024 4954 scope.go:117] "RemoveContainer" containerID="bd0ca4ad41624f1010f9badf4ad0d9925862fa25503f6b1962dabd2c1181aa11" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.188834 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d8b87c9-4dd0-431c-a555-49141762763a-kube-api-access-k8qrx" (OuterVolumeSpecName: "kube-api-access-k8qrx") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "kube-api-access-k8qrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.188897 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-kube-api-access-skklp" (OuterVolumeSpecName: "kube-api-access-skklp") pod "1f8f01be-65c4-4a56-85e5-1b46ba5804ee" (UID: "1f8f01be-65c4-4a56-85e5-1b46ba5804ee"). InnerVolumeSpecName "kube-api-access-skklp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.191000 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acf16584-ed76-41dc-955b-17e86a277627-kube-api-access-2vsjn" (OuterVolumeSpecName: "kube-api-access-2vsjn") pod "acf16584-ed76-41dc-955b-17e86a277627" (UID: "acf16584-ed76-41dc-955b-17e86a277627"). InnerVolumeSpecName "kube-api-access-2vsjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.191467 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c381eb3-f466-40b7-a962-6782db85678c-kube-api-access-ld8zk" (OuterVolumeSpecName: "kube-api-access-ld8zk") pod "9c381eb3-f466-40b7-a962-6782db85678c" (UID: "9c381eb3-f466-40b7-a962-6782db85678c"). InnerVolumeSpecName "kube-api-access-ld8zk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.192723 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-scripts" (OuterVolumeSpecName: "scripts") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.193209 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-scripts" (OuterVolumeSpecName: "scripts") pod "78bd4de4-5601-4771-b15c-c240e097519b" (UID: "78bd4de4-5601-4771-b15c-c240e097519b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.193518 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.194537 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-scripts" (OuterVolumeSpecName: "scripts") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.196905 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "45c8821a-baab-4e3c-8ffb-f4fe71722666" (UID: "45c8821a-baab-4e3c-8ffb-f4fe71722666"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.204772 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kube-api-access-jbnrh" (OuterVolumeSpecName: "kube-api-access-jbnrh") pod "afe4302a-413f-48e1-90a9-3f1178e5c6f7" (UID: "afe4302a-413f-48e1-90a9-3f1178e5c6f7"). InnerVolumeSpecName "kube-api-access-jbnrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231230 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231262 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231274 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45c8821a-baab-4e3c-8ffb-f4fe71722666-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231289 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1c362e30-8109-411f-9f89-21c7c28da6c2-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231302 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vsjn\" (UniqueName: \"kubernetes.io/projected/acf16584-ed76-41dc-955b-17e86a277627-kube-api-access-2vsjn\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231315 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt64v\" (UniqueName: \"kubernetes.io/projected/78bd4de4-5601-4771-b15c-c240e097519b-kube-api-access-kt64v\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231326 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjstg\" (UniqueName: \"kubernetes.io/projected/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-kube-api-access-cjstg\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231339 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfvxt\" (UniqueName: \"kubernetes.io/projected/642e0138-17d8-47e0-a67d-51a758291f7e-kube-api-access-kfvxt\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231351 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmwss\" (UniqueName: \"kubernetes.io/projected/1c362e30-8109-411f-9f89-21c7c28da6c2-kube-api-access-cmwss\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231362 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bx4zq\" (UniqueName: \"kubernetes.io/projected/45c8821a-baab-4e3c-8ffb-f4fe71722666-kube-api-access-bx4zq\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231373 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/642e0138-17d8-47e0-a67d-51a758291f7e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231408 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231422 4954 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d8b87c9-4dd0-431c-a555-49141762763a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231435 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231446 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231461 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/afe4302a-413f-48e1-90a9-3f1178e5c6f7-kube-api-access-jbnrh\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231474 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ld8zk\" (UniqueName: \"kubernetes.io/projected/9c381eb3-f466-40b7-a962-6782db85678c-kube-api-access-ld8zk\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231485 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231496 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/afe4302a-413f-48e1-90a9-3f1178e5c6f7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231509 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8qrx\" (UniqueName: \"kubernetes.io/projected/7d8b87c9-4dd0-431c-a555-49141762763a-kube-api-access-k8qrx\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.231519 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skklp\" (UniqueName: \"kubernetes.io/projected/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-kube-api-access-skklp\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.259901 4954 scope.go:117] "RemoveContainer" containerID="01194eaa4540cc51e694ffb69c1bc9bb2fcad75b283509d2f2d288ca98fa6388" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.331813 4954 scope.go:117] "RemoveContainer" containerID="e0e955ad82d7153e1ce49532c6e27790509f8ccd441ac5a41fe5281a1f7f3c3a" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.338614 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-config-data" (OuterVolumeSpecName: "config-data") pod "1f8f01be-65c4-4a56-85e5-1b46ba5804ee" (UID: "1f8f01be-65c4-4a56-85e5-1b46ba5804ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.386858 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c381eb3-f466-40b7-a962-6782db85678c" (UID: "9c381eb3-f466-40b7-a962-6782db85678c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.435218 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.435784 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.456811 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.460635 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "45c8821a-baab-4e3c-8ffb-f4fe71722666" (UID: "45c8821a-baab-4e3c-8ffb-f4fe71722666"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.473128 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-config-data" (OuterVolumeSpecName: "config-data") pod "642e0138-17d8-47e0-a67d-51a758291f7e" (UID: "642e0138-17d8-47e0-a67d-51a758291f7e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.486235 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-vmnzt" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.508788 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "9c381eb3-f466-40b7-a962-6782db85678c" (UID: "9c381eb3-f466-40b7-a962-6782db85678c"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.514734 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "642e0138-17d8-47e0-a67d-51a758291f7e" (UID: "642e0138-17d8-47e0-a67d-51a758291f7e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.515720 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.529913 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "afe4302a-413f-48e1-90a9-3f1178e5c6f7" (UID: "afe4302a-413f-48e1-90a9-3f1178e5c6f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.537511 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.537557 4954 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.537566 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.537579 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.537587 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.537595 4954 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.537605 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.545462 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "642e0138-17d8-47e0-a67d-51a758291f7e" (UID: "642e0138-17d8-47e0-a67d-51a758291f7e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.554805 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-vmnzt" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" probeResult="failure" output=< Nov 28 16:36:50 crc kubenswrapper[4954]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Nov 28 16:36:50 crc kubenswrapper[4954]: > Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.566325 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.606826 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78bd4de4-5601-4771-b15c-c240e097519b" (UID: "78bd4de4-5601-4771-b15c-c240e097519b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.610366 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.626113 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.638740 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.638775 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.638784 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.638796 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642e0138-17d8-47e0-a67d-51a758291f7e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.638805 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.640326 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-config-data" (OuterVolumeSpecName: "config-data") pod "1c362e30-8109-411f-9f89-21c7c28da6c2" (UID: "1c362e30-8109-411f-9f89-21c7c28da6c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.650772 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data" (OuterVolumeSpecName: "config-data") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.650880 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45c8821a-baab-4e3c-8ffb-f4fe71722666" (UID: "45c8821a-baab-4e3c-8ffb-f4fe71722666"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.661731 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7d8b87c9-4dd0-431c-a555-49141762763a" (UID: "7d8b87c9-4dd0-431c-a555-49141762763a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.677430 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "9c381eb3-f466-40b7-a962-6782db85678c" (UID: "9c381eb3-f466-40b7-a962-6782db85678c"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.686981 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data" (OuterVolumeSpecName: "config-data") pod "45c8821a-baab-4e3c-8ffb-f4fe71722666" (UID: "45c8821a-baab-4e3c-8ffb-f4fe71722666"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.695212 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican3a2d-account-delete-cznv8" event={"ID":"6abe9424-aabe-4ae6-8032-79b92583d31d","Type":"ContainerDied","Data":"975a0e0a8eecf7ee795a4e53cace889204a81ebfe3bfbd86dd25ed5d15f3112a"} Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.695268 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="975a0e0a8eecf7ee795a4e53cace889204a81ebfe3bfbd86dd25ed5d15f3112a" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.704768 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"afe4302a-413f-48e1-90a9-3f1178e5c6f7","Type":"ContainerDied","Data":"d87151f70a2a333463796e25a28edf09ad345c5c289f9c1ca91b7366806f6f6f"} Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.704798 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.704987 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f8f01be-65c4-4a56-85e5-1b46ba5804ee" (UID: "1f8f01be-65c4-4a56-85e5-1b46ba5804ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.718040 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0528b-account-delete-4cx2z" event={"ID":"af0c7199-9850-4553-b644-371c7e305443","Type":"ContainerDied","Data":"1ec4857c3a2d180b112120227a1185915008e6ef80fac9e092c9f0ebc29bb655"} Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.718122 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ec4857c3a2d180b112120227a1185915008e6ef80fac9e092c9f0ebc29bb655" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.719426 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "afe4302a-413f-48e1-90a9-3f1178e5c6f7" (UID: "afe4302a-413f-48e1-90a9-3f1178e5c6f7"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.720299 4954 generic.go:334] "Generic (PLEG): container finished" podID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerID="dd5fdf435b0dbd1c11c86c7785b0da91c9296f8511aad91bbac2bb898f435f32" exitCode=0 Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.720366 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca81ef12-eb13-468e-81fc-0fdf6aba8830","Type":"ContainerDied","Data":"dd5fdf435b0dbd1c11c86c7785b0da91c9296f8511aad91bbac2bb898f435f32"} Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.722816 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron9335-account-delete-9jxvf" event={"ID":"762dc5ac-a8d3-4b91-806c-079e568530b1","Type":"ContainerDied","Data":"aaf1262e16cb90a45fa7d090ebef8b947e7ea5d9b1f558ad8a157ccb67c6adbf"} Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.722840 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aaf1262e16cb90a45fa7d090ebef8b947e7ea5d9b1f558ad8a157ccb67c6adbf" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.723714 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "78bd4de4-5601-4771-b15c-c240e097519b" (UID: "78bd4de4-5601-4771-b15c-c240e097519b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.724175 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderd6b0-account-delete-plvlb" event={"ID":"d887f697-ad36-44e6-8011-878d0a78b3bf","Type":"ContainerDied","Data":"47da1476b395494d1e6db9fee186e2710c9b35ada234d738a95e66e6debe8dcf"} Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.724198 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47da1476b395494d1e6db9fee186e2710c9b35ada234d738a95e66e6debe8dcf" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.727178 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.727756 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement4c07-account-delete-52zll" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.727806 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.727856 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68f67467f4-8bd8x" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.728409 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.729355 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5fb676564c-cfds2" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.730182 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.730609 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-89b6465dd-zsqfd" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.731281 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.733080 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glanceb261-account-delete-s574s" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.735156 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750483 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750526 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750744 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750772 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d8b87c9-4dd0-431c-a555-49141762763a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750788 4954 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c381eb3-f466-40b7-a962-6782db85678c-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750802 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750819 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c362e30-8109-411f-9f89-21c7c28da6c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750831 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f8f01be-65c4-4a56-85e5-1b46ba5804ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.750844 4954 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/afe4302a-413f-48e1-90a9-3f1178e5c6f7-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.776486 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-config-data" (OuterVolumeSpecName: "config-data") pod "78bd4de4-5601-4771-b15c-c240e097519b" (UID: "78bd4de4-5601-4771-b15c-c240e097519b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.852314 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.855511 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "45c8821a-baab-4e3c-8ffb-f4fe71722666" (UID: "45c8821a-baab-4e3c-8ffb-f4fe71722666"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.857859 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "78bd4de4-5601-4771-b15c-c240e097519b" (UID: "78bd4de4-5601-4771-b15c-c240e097519b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.927446 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.930407 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.942147 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 16:36:50 crc kubenswrapper[4954]: E1128 16:36:50.942227 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="ovn-northd" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.954384 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/45c8821a-baab-4e3c-8ffb-f4fe71722666-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:50 crc kubenswrapper[4954]: I1128 16:36:50.954497 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/78bd4de4-5601-4771-b15c-c240e097519b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: E1128 16:36:51.057062 4954 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 16:36:51 crc kubenswrapper[4954]: E1128 16:36:51.057149 4954 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data podName:8a252e1a-c96a-4f98-b24e-b224fedf344c nodeName:}" failed. No retries permitted until 2025-11-28 16:36:59.057135544 +0000 UTC m=+1572.448804085 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data") pod "rabbitmq-server-0" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c") : configmap "rabbitmq-config-data" not found Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.177363 4954 scope.go:117] "RemoveContainer" containerID="c22a271e9381f4b47783c1a7935315aa68fd7d56ae23e9e0515f0742f1172519" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.197158 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.260807 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.262320 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwtx7\" (UniqueName: \"kubernetes.io/projected/762dc5ac-a8d3-4b91-806c-079e568530b1-kube-api-access-kwtx7\") pod \"762dc5ac-a8d3-4b91-806c-079e568530b1\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.262498 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/762dc5ac-a8d3-4b91-806c-079e568530b1-operator-scripts\") pod \"762dc5ac-a8d3-4b91-806c-079e568530b1\" (UID: \"762dc5ac-a8d3-4b91-806c-079e568530b1\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.264811 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/762dc5ac-a8d3-4b91-806c-079e568530b1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "762dc5ac-a8d3-4b91-806c-079e568530b1" (UID: "762dc5ac-a8d3-4b91-806c-079e568530b1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.270468 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/762dc5ac-a8d3-4b91-806c-079e568530b1-kube-api-access-kwtx7" (OuterVolumeSpecName: "kube-api-access-kwtx7") pod "762dc5ac-a8d3-4b91-806c-079e568530b1" (UID: "762dc5ac-a8d3-4b91-806c-079e568530b1"). InnerVolumeSpecName "kube-api-access-kwtx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.275694 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.303982 4954 scope.go:117] "RemoveContainer" containerID="5a2d7806f5c1628a5fc711b36527a7d4ae8db75c650b99cdcc2d5020d8a8b6cd" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.364462 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d887f697-ad36-44e6-8011-878d0a78b3bf-operator-scripts\") pod \"d887f697-ad36-44e6-8011-878d0a78b3bf\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.364527 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c7199-9850-4553-b644-371c7e305443-operator-scripts\") pod \"af0c7199-9850-4553-b644-371c7e305443\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.364643 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95fn9\" (UniqueName: \"kubernetes.io/projected/d887f697-ad36-44e6-8011-878d0a78b3bf-kube-api-access-95fn9\") pod \"d887f697-ad36-44e6-8011-878d0a78b3bf\" (UID: \"d887f697-ad36-44e6-8011-878d0a78b3bf\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.364686 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rlxx\" (UniqueName: \"kubernetes.io/projected/af0c7199-9850-4553-b644-371c7e305443-kube-api-access-7rlxx\") pod \"af0c7199-9850-4553-b644-371c7e305443\" (UID: \"af0c7199-9850-4553-b644-371c7e305443\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.365333 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af0c7199-9850-4553-b644-371c7e305443-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "af0c7199-9850-4553-b644-371c7e305443" (UID: "af0c7199-9850-4553-b644-371c7e305443"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.365666 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d887f697-ad36-44e6-8011-878d0a78b3bf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d887f697-ad36-44e6-8011-878d0a78b3bf" (UID: "d887f697-ad36-44e6-8011-878d0a78b3bf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.366217 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwtx7\" (UniqueName: \"kubernetes.io/projected/762dc5ac-a8d3-4b91-806c-079e568530b1-kube-api-access-kwtx7\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.366237 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/762dc5ac-a8d3-4b91-806c-079e568530b1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.368973 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af0c7199-9850-4553-b644-371c7e305443-kube-api-access-7rlxx" (OuterVolumeSpecName: "kube-api-access-7rlxx") pod "af0c7199-9850-4553-b644-371c7e305443" (UID: "af0c7199-9850-4553-b644-371c7e305443"). InnerVolumeSpecName "kube-api-access-7rlxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.369777 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d887f697-ad36-44e6-8011-878d0a78b3bf-kube-api-access-95fn9" (OuterVolumeSpecName: "kube-api-access-95fn9") pod "d887f697-ad36-44e6-8011-878d0a78b3bf" (UID: "d887f697-ad36-44e6-8011-878d0a78b3bf"). InnerVolumeSpecName "kube-api-access-95fn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.467818 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95fn9\" (UniqueName: \"kubernetes.io/projected/d887f697-ad36-44e6-8011-878d0a78b3bf-kube-api-access-95fn9\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.467903 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rlxx\" (UniqueName: \"kubernetes.io/projected/af0c7199-9850-4553-b644-371c7e305443-kube-api-access-7rlxx\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.467914 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d887f697-ad36-44e6-8011-878d0a78b3bf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.467922 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af0c7199-9850-4553-b644-371c7e305443-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.476634 4954 scope.go:117] "RemoveContainer" containerID="13ae9aa3934e30fe09e390d2eb8d342d20df09c16cb5c9d3cd643b0cba0bac5e" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.495419 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.516447 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.517363 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.519256 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.534115 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.546903 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.550087 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568070 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568769 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca81ef12-eb13-468e-81fc-0fdf6aba8830-erlang-cookie-secret\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568812 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca81ef12-eb13-468e-81fc-0fdf6aba8830-pod-info\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568897 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568914 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-kolla-config\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568934 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts\") pod \"1bc7ec75-0145-47f2-8193-28c8f17b572a\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568954 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-plugins-conf\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.568980 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-plugins\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569011 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvkpb\" (UniqueName: \"kubernetes.io/projected/6abe9424-aabe-4ae6-8032-79b92583d31d-kube-api-access-gvkpb\") pod \"6abe9424-aabe-4ae6-8032-79b92583d31d\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569037 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-galera-tls-certs\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569053 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-tls\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569070 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25lbv\" (UniqueName: \"kubernetes.io/projected/1d348de7-2e67-41df-8d59-4692491ea145-kube-api-access-25lbv\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569089 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqhwh\" (UniqueName: \"kubernetes.io/projected/1bc7ec75-0145-47f2-8193-28c8f17b572a-kube-api-access-xqhwh\") pod \"1bc7ec75-0145-47f2-8193-28c8f17b572a\" (UID: \"1bc7ec75-0145-47f2-8193-28c8f17b572a\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569106 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569125 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-erlang-cookie\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569151 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569176 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d348de7-2e67-41df-8d59-4692491ea145-config-data-generated\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569195 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-operator-scripts\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569250 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7g7mp\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-kube-api-access-7g7mp\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569288 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-server-conf\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569313 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6abe9424-aabe-4ae6-8032-79b92583d31d-operator-scripts\") pod \"6abe9424-aabe-4ae6-8032-79b92583d31d\" (UID: \"6abe9424-aabe-4ae6-8032-79b92583d31d\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569333 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-config-data-default\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569362 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-confd\") pod \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\" (UID: \"ca81ef12-eb13-468e-81fc-0fdf6aba8830\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.569387 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-combined-ca-bundle\") pod \"1d348de7-2e67-41df-8d59-4692491ea145\" (UID: \"1d348de7-2e67-41df-8d59-4692491ea145\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.574449 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.575172 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.575955 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1bc7ec75-0145-47f2-8193-28c8f17b572a" (UID: "1bc7ec75-0145-47f2-8193-28c8f17b572a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.577623 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d348de7-2e67-41df-8d59-4692491ea145-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.578421 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6abe9424-aabe-4ae6-8032-79b92583d31d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6abe9424-aabe-4ae6-8032-79b92583d31d" (UID: "6abe9424-aabe-4ae6-8032-79b92583d31d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.581293 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.582388 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d348de7-2e67-41df-8d59-4692491ea145-kube-api-access-25lbv" (OuterVolumeSpecName: "kube-api-access-25lbv") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "kube-api-access-25lbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.582957 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bc7ec75-0145-47f2-8193-28c8f17b572a-kube-api-access-xqhwh" (OuterVolumeSpecName: "kube-api-access-xqhwh") pod "1bc7ec75-0145-47f2-8193-28c8f17b572a" (UID: "1bc7ec75-0145-47f2-8193-28c8f17b572a"). InnerVolumeSpecName "kube-api-access-xqhwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.583065 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-kube-api-access-7g7mp" (OuterVolumeSpecName: "kube-api-access-7g7mp") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "kube-api-access-7g7mp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.584108 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.584245 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.584738 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca81ef12-eb13-468e-81fc-0fdf6aba8830-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.584817 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.585100 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8bff6e67-d9f4-4952-992d-1fa362d23a5c/ovn-northd/0.log" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.585171 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.587753 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.590803 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6abe9424-aabe-4ae6-8032-79b92583d31d-kube-api-access-gvkpb" (OuterVolumeSpecName: "kube-api-access-gvkpb") pod "6abe9424-aabe-4ae6-8032-79b92583d31d" (UID: "6abe9424-aabe-4ae6-8032-79b92583d31d"). InnerVolumeSpecName "kube-api-access-gvkpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.595776 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.603842 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/ca81ef12-eb13-468e-81fc-0fdf6aba8830-pod-info" (OuterVolumeSpecName: "pod-info") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.605417 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "mysql-db") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.621151 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.629243 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.631594 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data" (OuterVolumeSpecName: "config-data") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.641768 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.647801 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "1d348de7-2e67-41df-8d59-4692491ea145" (UID: "1d348de7-2e67-41df-8d59-4692491ea145"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.650394 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.660285 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68f67467f4-8bd8x"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.661265 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-server-conf" (OuterVolumeSpecName: "server-conf") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670653 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-config\") pod \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670705 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-plugins-conf\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670757 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-plugins\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670797 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkddb\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-kube-api-access-vkddb\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670824 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8a252e1a-c96a-4f98-b24e-b224fedf344c-erlang-cookie-secret\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670880 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8a252e1a-c96a-4f98-b24e-b224fedf344c-pod-info\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670928 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-metrics-certs-tls-certs\") pod \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.670974 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671002 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671027 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfwwp\" (UniqueName: \"kubernetes.io/projected/8bff6e67-d9f4-4952-992d-1fa362d23a5c-kube-api-access-bfwwp\") pod \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671053 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-server-conf\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671084 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-erlang-cookie\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671138 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-northd-tls-certs\") pod \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671190 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-scripts\") pod \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671214 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-tls\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671256 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-confd\") pod \"8a252e1a-c96a-4f98-b24e-b224fedf344c\" (UID: \"8a252e1a-c96a-4f98-b24e-b224fedf344c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671281 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-combined-ca-bundle\") pod \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671329 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-rundir\") pod \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\" (UID: \"8bff6e67-d9f4-4952-992d-1fa362d23a5c\") " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671701 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7g7mp\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-kube-api-access-7g7mp\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671717 4954 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671729 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6abe9424-aabe-4ae6-8032-79b92583d31d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671742 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671752 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671763 4954 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ca81ef12-eb13-468e-81fc-0fdf6aba8830-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671774 4954 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ca81ef12-eb13-468e-81fc-0fdf6aba8830-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671786 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671798 4954 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671812 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1bc7ec75-0145-47f2-8193-28c8f17b572a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671824 4954 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ca81ef12-eb13-468e-81fc-0fdf6aba8830-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671833 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671847 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvkpb\" (UniqueName: \"kubernetes.io/projected/6abe9424-aabe-4ae6-8032-79b92583d31d-kube-api-access-gvkpb\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671859 4954 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d348de7-2e67-41df-8d59-4692491ea145-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671869 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671879 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25lbv\" (UniqueName: \"kubernetes.io/projected/1d348de7-2e67-41df-8d59-4692491ea145-kube-api-access-25lbv\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671890 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqhwh\" (UniqueName: \"kubernetes.io/projected/1bc7ec75-0145-47f2-8193-28c8f17b572a-kube-api-access-xqhwh\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671912 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671923 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671940 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671951 4954 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d348de7-2e67-41df-8d59-4692491ea145-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.671962 4954 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d348de7-2e67-41df-8d59-4692491ea145-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.673063 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "8bff6e67-d9f4-4952-992d-1fa362d23a5c" (UID: "8bff6e67-d9f4-4952-992d-1fa362d23a5c"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.673594 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.673723 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-config" (OuterVolumeSpecName: "config") pod "8bff6e67-d9f4-4952-992d-1fa362d23a5c" (UID: "8bff6e67-d9f4-4952-992d-1fa362d23a5c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.675379 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.675813 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.676430 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-scripts" (OuterVolumeSpecName: "scripts") pod "8bff6e67-d9f4-4952-992d-1fa362d23a5c" (UID: "8bff6e67-d9f4-4952-992d-1fa362d23a5c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.678976 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-68f67467f4-8bd8x"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.685476 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a252e1a-c96a-4f98-b24e-b224fedf344c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.689217 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/8a252e1a-c96a-4f98-b24e-b224fedf344c-pod-info" (OuterVolumeSpecName: "pod-info") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.696796 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-89b6465dd-zsqfd"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.702210 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-kube-api-access-vkddb" (OuterVolumeSpecName: "kube-api-access-vkddb") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "kube-api-access-vkddb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.702258 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.704357 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.704675 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "ca81ef12-eb13-468e-81fc-0fdf6aba8830" (UID: "ca81ef12-eb13-468e-81fc-0fdf6aba8830"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: E1128 16:36:51.718423 4954 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 16:36:51 crc kubenswrapper[4954]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T16:36:44Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 16:36:51 crc kubenswrapper[4954]: /etc/init.d/functions: line 589: 435 Alarm clock "$@" Nov 28 16:36:51 crc kubenswrapper[4954]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-vmnzt" message=< Nov 28 16:36:51 crc kubenswrapper[4954]: Exiting ovn-controller (1) [FAILED] Nov 28 16:36:51 crc kubenswrapper[4954]: Killing ovn-controller (1) [ OK ] Nov 28 16:36:51 crc kubenswrapper[4954]: Killing ovn-controller (1) with SIGKILL [ OK ] Nov 28 16:36:51 crc kubenswrapper[4954]: 2025-11-28T16:36:44Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 16:36:51 crc kubenswrapper[4954]: /etc/init.d/functions: line 589: 435 Alarm clock "$@" Nov 28 16:36:51 crc kubenswrapper[4954]: > Nov 28 16:36:51 crc kubenswrapper[4954]: E1128 16:36:51.718462 4954 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 16:36:51 crc kubenswrapper[4954]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T16:36:44Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 16:36:51 crc kubenswrapper[4954]: /etc/init.d/functions: line 589: 435 Alarm clock "$@" Nov 28 16:36:51 crc kubenswrapper[4954]: > pod="openstack/ovn-controller-vmnzt" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" containerID="cri-o://f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.718499 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-vmnzt" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" containerID="cri-o://f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326" gracePeriod=22 Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.723319 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bff6e67-d9f4-4952-992d-1fa362d23a5c-kube-api-access-bfwwp" (OuterVolumeSpecName: "kube-api-access-bfwwp") pod "8bff6e67-d9f4-4952-992d-1fa362d23a5c" (UID: "8bff6e67-d9f4-4952-992d-1fa362d23a5c"). InnerVolumeSpecName "kube-api-access-bfwwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.737945 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-89b6465dd-zsqfd"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.753217 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773164 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773201 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfwwp\" (UniqueName: \"kubernetes.io/projected/8bff6e67-d9f4-4952-992d-1fa362d23a5c-kube-api-access-bfwwp\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773212 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773222 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773231 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773239 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773249 4954 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773257 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bff6e67-d9f4-4952-992d-1fa362d23a5c-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773265 4954 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773273 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ca81ef12-eb13-468e-81fc-0fdf6aba8830-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773281 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773289 4954 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8a252e1a-c96a-4f98-b24e-b224fedf344c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773298 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkddb\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-kube-api-access-vkddb\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.773307 4954 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8a252e1a-c96a-4f98-b24e-b224fedf344c-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.776044 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8bff6e67-d9f4-4952-992d-1fa362d23a5c/ovn-northd/0.log" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.776082 4954 generic.go:334] "Generic (PLEG): container finished" podID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" exitCode=139 Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.776137 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8bff6e67-d9f4-4952-992d-1fa362d23a5c","Type":"ContainerDied","Data":"a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.776164 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8bff6e67-d9f4-4952-992d-1fa362d23a5c","Type":"ContainerDied","Data":"6f923c39766002793dc5627e20b72140d6662eb95b5f71078335c85713439d31"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.776169 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.776182 4954 scope.go:117] "RemoveContainer" containerID="022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.779262 4954 generic.go:334] "Generic (PLEG): container finished" podID="caa7b547-96ec-4119-87b1-fa14697ba9d1" containerID="de3a57e387f1582f6570d0c5f5e659d725722d24b50d157c852c9079752bde22" exitCode=0 Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.779336 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5767fbd5b4-jmjzp" event={"ID":"caa7b547-96ec-4119-87b1-fa14697ba9d1","Type":"ContainerDied","Data":"de3a57e387f1582f6570d0c5f5e659d725722d24b50d157c852c9079752bde22"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.780027 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8bff6e67-d9f4-4952-992d-1fa362d23a5c" (UID: "8bff6e67-d9f4-4952-992d-1fa362d23a5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.786732 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.787789 4954 generic.go:334] "Generic (PLEG): container finished" podID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerID="bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d" exitCode=0 Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.787871 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.787870 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8a252e1a-c96a-4f98-b24e-b224fedf344c","Type":"ContainerDied","Data":"bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.787911 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8a252e1a-c96a-4f98-b24e-b224fedf344c","Type":"ContainerDied","Data":"97b453813744c95117932caad133352dde444a5f9d42c494462fd93989d711ed"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.792511 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.794286 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.796181 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.796873 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif92b-account-delete-sj7n5" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.798996 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif92b-account-delete-sj7n5" event={"ID":"1bc7ec75-0145-47f2-8193-28c8f17b572a","Type":"ContainerDied","Data":"c705e87d6dab562e3079674833001b497d39ab9d92e83220fb67c4568a538703"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.799042 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c705e87d6dab562e3079674833001b497d39ab9d92e83220fb67c4568a538703" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.815992 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.816241 4954 scope.go:117] "RemoveContainer" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.822549 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.822901 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ca81ef12-eb13-468e-81fc-0fdf6aba8830","Type":"ContainerDied","Data":"ccf80cb55981316753bed96b4d01216050bda14ebc5320cd7e46d2ec96fb5337"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.822966 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.830856 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.833562 4954 generic.go:334] "Generic (PLEG): container finished" podID="1d348de7-2e67-41df-8d59-4692491ea145" containerID="8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07" exitCode=0 Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.833618 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d348de7-2e67-41df-8d59-4692491ea145","Type":"ContainerDied","Data":"8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.833641 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d348de7-2e67-41df-8d59-4692491ea145","Type":"ContainerDied","Data":"35067c7d8aad12b2c7373fb07d737310ed05c7b84ca8d0d3a924cc92d448d14d"} Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.833707 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.836122 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.856714 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.877029 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.877059 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.877068 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.880946 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0528b-account-delete-4cx2z" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.882624 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderd6b0-account-delete-plvlb" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.883288 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican3a2d-account-delete-cznv8" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.883767 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron9335-account-delete-9jxvf" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.916584 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f8d636b-e07a-46b1-91b3-899a395e3ce5" path="/var/lib/kubelet/pods/0f8d636b-e07a-46b1-91b3-899a395e3ce5/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.924212 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" path="/var/lib/kubelet/pods/1c362e30-8109-411f-9f89-21c7c28da6c2/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.925806 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" path="/var/lib/kubelet/pods/2fb4ddd8-d914-431c-a39f-28a0c6b45354/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.934015 4954 scope.go:117] "RemoveContainer" containerID="022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.934527 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" path="/var/lib/kubelet/pods/45c8821a-baab-4e3c-8ffb-f4fe71722666/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: E1128 16:36:51.934780 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6\": container with ID starting with 022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6 not found: ID does not exist" containerID="022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.934866 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6"} err="failed to get container status \"022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6\": rpc error: code = NotFound desc = could not find container \"022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6\": container with ID starting with 022212ff501e5715e891c5ad172a0810fa883b76595a765f3609b5a7f03c28b6 not found: ID does not exist" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.934941 4954 scope.go:117] "RemoveContainer" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.935987 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" path="/var/lib/kubelet/pods/642e0138-17d8-47e0-a67d-51a758291f7e/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.937619 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" path="/var/lib/kubelet/pods/70613221-3087-4dc3-9f41-86eb6fe88041/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.941262 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78bd4de4-5601-4771-b15c-c240e097519b" path="/var/lib/kubelet/pods/78bd4de4-5601-4771-b15c-c240e097519b/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.947633 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-server-conf" (OuterVolumeSpecName: "server-conf") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.954165 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "8bff6e67-d9f4-4952-992d-1fa362d23a5c" (UID: "8bff6e67-d9f4-4952-992d-1fa362d23a5c"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: E1128 16:36:51.954411 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b\": container with ID starting with a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b not found: ID does not exist" containerID="a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.955651 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data" (OuterVolumeSpecName: "config-data") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.956426 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" path="/var/lib/kubelet/pods/7cf746a5-7be5-4e2d-a7f8-18878e2a41a5/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.958075 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" path="/var/lib/kubelet/pods/7d8b87c9-4dd0-431c-a555-49141762763a/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.960133 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" path="/var/lib/kubelet/pods/9023ad7d-6621-4ed8-aec4-bd1d0db53088/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.961367 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b"} err="failed to get container status \"a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b\": rpc error: code = NotFound desc = could not find container \"a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b\": container with ID starting with a2d1edf9c99a1c32bfd44c3393b8a4f24c3b1dd91c96f3effc9ee4d652abf17b not found: ID does not exist" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.961461 4954 scope.go:117] "RemoveContainer" containerID="bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.962645 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" path="/var/lib/kubelet/pods/91dcac80-4f79-4fbb-81c5-4fd24cb69ccf/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.964554 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c381eb3-f466-40b7-a962-6782db85678c" path="/var/lib/kubelet/pods/9c381eb3-f466-40b7-a962-6782db85678c/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.965133 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" path="/var/lib/kubelet/pods/c4439c3d-c90f-4b13-87a4-01c211cec875/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.968708 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf74e9b3-c272-47a3-bd81-1fae19e39236" path="/var/lib/kubelet/pods/cf74e9b3-c272-47a3-bd81-1fae19e39236/volumes" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.978849 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.978887 4954 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8a252e1a-c96a-4f98-b24e-b224fedf344c-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:51 crc kubenswrapper[4954]: I1128 16:36:51.978906 4954 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.002802 4954 scope.go:117] "RemoveContainer" containerID="350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.009668 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.009742 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.013261 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "8a252e1a-c96a-4f98-b24e-b224fedf344c" (UID: "8a252e1a-c96a-4f98-b24e-b224fedf344c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.027634 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "8bff6e67-d9f4-4952-992d-1fa362d23a5c" (UID: "8bff6e67-d9f4-4952-992d-1fa362d23a5c"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.027872 4954 scope.go:117] "RemoveContainer" containerID="bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d" Nov 28 16:36:52 crc kubenswrapper[4954]: E1128 16:36:52.028387 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d\": container with ID starting with bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d not found: ID does not exist" containerID="bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.028443 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d"} err="failed to get container status \"bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d\": rpc error: code = NotFound desc = could not find container \"bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d\": container with ID starting with bd527e7188610d7a63072bd328e5dd74a2b0ad964e3b6ec643b1c9a69f9b0c1d not found: ID does not exist" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.028477 4954 scope.go:117] "RemoveContainer" containerID="350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd" Nov 28 16:36:52 crc kubenswrapper[4954]: E1128 16:36:52.028839 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd\": container with ID starting with 350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd not found: ID does not exist" containerID="350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.028879 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd"} err="failed to get container status \"350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd\": rpc error: code = NotFound desc = could not find container \"350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd\": container with ID starting with 350d2239e212031027828edd8c6bccb2046da3c9a507517a93cf98dffefd49cd not found: ID does not exist" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.028903 4954 scope.go:117] "RemoveContainer" containerID="dd5fdf435b0dbd1c11c86c7785b0da91c9296f8511aad91bbac2bb898f435f32" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.040951 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.045806 4954 scope.go:117] "RemoveContainer" containerID="edef8b1b7915e1f7d6f114baa465a89b8aaf40344d1baefb410030093e2aaa7b" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.051553 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-5fb676564c-cfds2"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.073766 4954 scope.go:117] "RemoveContainer" containerID="8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.085458 4954 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8a252e1a-c96a-4f98-b24e-b224fedf344c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.085492 4954 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8bff6e67-d9f4-4952-992d-1fa362d23a5c-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.113078 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.115063 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-5fb676564c-cfds2"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.151135 4954 scope.go:117] "RemoveContainer" containerID="8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186284 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-combined-ca-bundle\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186349 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-credential-keys\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186369 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-scripts\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186509 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbjpb\" (UniqueName: \"kubernetes.io/projected/caa7b547-96ec-4119-87b1-fa14697ba9d1-kube-api-access-kbjpb\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186586 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-fernet-keys\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186648 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-public-tls-certs\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186758 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-internal-tls-certs\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.186832 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-config-data\") pod \"caa7b547-96ec-4119-87b1-fa14697ba9d1\" (UID: \"caa7b547-96ec-4119-87b1-fa14697ba9d1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.192509 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.203903 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-fs8x2"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.216968 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glanceb261-account-delete-s574s"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.235805 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.236114 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caa7b547-96ec-4119-87b1-fa14697ba9d1-kube-api-access-kbjpb" (OuterVolumeSpecName: "kube-api-access-kbjpb") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "kube-api-access-kbjpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.240485 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-scripts" (OuterVolumeSpecName: "scripts") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.245745 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.245815 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-b261-account-create-update-z4z7m"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.252076 4954 scope.go:117] "RemoveContainer" containerID="8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07" Nov 28 16:36:52 crc kubenswrapper[4954]: E1128 16:36:52.252656 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07\": container with ID starting with 8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07 not found: ID does not exist" containerID="8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.252706 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07"} err="failed to get container status \"8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07\": rpc error: code = NotFound desc = could not find container \"8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07\": container with ID starting with 8532429bd8b897e8166cfc3585b91c29bc3335e178d6afa7ee092edcea1a6b07 not found: ID does not exist" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.252731 4954 scope.go:117] "RemoveContainer" containerID="8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664" Nov 28 16:36:52 crc kubenswrapper[4954]: E1128 16:36:52.253193 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664\": container with ID starting with 8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664 not found: ID does not exist" containerID="8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.253236 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664"} err="failed to get container status \"8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664\": rpc error: code = NotFound desc = could not find container \"8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664\": container with ID starting with 8e8430765fa780e78273b20891494975f648a250228f50c0bf9c943516e4e664 not found: ID does not exist" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.264394 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.266743 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-fs8x2"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.286509 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-b261-account-create-update-z4z7m"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.289244 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.289274 4954 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.289293 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.289307 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbjpb\" (UniqueName: \"kubernetes.io/projected/caa7b547-96ec-4119-87b1-fa14697ba9d1-kube-api-access-kbjpb\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.289319 4954 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.289329 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.301009 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.326601 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glanceb261-account-delete-s574s"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.372365 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.381604 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.389656 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-djcvh"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.390516 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.393247 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-config-data" (OuterVolumeSpecName: "config-data") pod "caa7b547-96ec-4119-87b1-fa14697ba9d1" (UID: "caa7b547-96ec-4119-87b1-fa14697ba9d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.397377 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-djcvh"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.405248 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-4c07-account-create-update-ph8bg"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.424616 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement4c07-account-delete-52zll"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.436676 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-4c07-account-create-update-ph8bg"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.441834 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-vmnzt_e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1/ovn-controller/0.log" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.441902 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.443984 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement4c07-account-delete-52zll"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.451846 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.459628 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.468592 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-dkjtq"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.476770 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-dkjtq"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495033 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-d6b0-account-create-update-z5zkz"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495420 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run-ovn\") pod \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495506 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-combined-ca-bundle\") pod \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495576 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run\") pod \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495605 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-ovn-controller-tls-certs\") pod \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495658 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctpg7\" (UniqueName: \"kubernetes.io/projected/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-kube-api-access-ctpg7\") pod \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495685 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-scripts\") pod \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.495716 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-log-ovn\") pod \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\" (UID: \"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1\") " Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.496056 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caa7b547-96ec-4119-87b1-fa14697ba9d1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.496112 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" (UID: "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.496148 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" (UID: "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.499085 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run" (OuterVolumeSpecName: "var-run") pod "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" (UID: "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.500091 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-scripts" (OuterVolumeSpecName: "scripts") pod "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" (UID: "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.501240 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-kube-api-access-ctpg7" (OuterVolumeSpecName: "kube-api-access-ctpg7") pod "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" (UID: "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1"). InnerVolumeSpecName "kube-api-access-ctpg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.501264 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-d6b0-account-create-update-z5zkz"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.511616 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinderd6b0-account-delete-plvlb"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.524156 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinderd6b0-account-delete-plvlb"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.530313 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" (UID: "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.534237 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.542171 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.550425 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.557883 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.566562 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-s75gb"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.573798 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-s75gb"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.578092 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" (UID: "e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.582710 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron9335-account-delete-9jxvf"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.592554 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-9335-account-create-update-r8gqv"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.597965 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.598011 4954 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.598022 4954 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.598033 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctpg7\" (UniqueName: \"kubernetes.io/projected/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-kube-api-access-ctpg7\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.598070 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.598081 4954 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.598091 4954 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.607730 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-9335-account-create-update-r8gqv"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.618742 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron9335-account-delete-9jxvf"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.626970 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-jt755"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.634330 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-jt755"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.650586 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican3a2d-account-delete-cznv8"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.658673 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-3a2d-account-create-update-tdzcg"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.682677 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-3a2d-account-create-update-tdzcg"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.692892 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican3a2d-account-delete-cznv8"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.839434 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-qtp2z"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.851220 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-qtp2z"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.862117 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapif92b-account-delete-sj7n5"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.866291 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f92b-account-create-update-j96hc"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.878764 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapif92b-account-delete-sj7n5"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.891754 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f92b-account-create-update-j96hc"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.893756 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-vmnzt_e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1/ovn-controller/0.log" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.893849 4954 generic.go:334] "Generic (PLEG): container finished" podID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerID="f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326" exitCode=137 Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.893970 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt" event={"ID":"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1","Type":"ContainerDied","Data":"f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326"} Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.894043 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-vmnzt" event={"ID":"e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1","Type":"ContainerDied","Data":"06a79d8e20c952b6c20c1e9bde76dd0728e40021af86a13f5f7b4e4a3667a679"} Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.894122 4954 scope.go:117] "RemoveContainer" containerID="f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.894745 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-vmnzt" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.902397 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5767fbd5b4-jmjzp" event={"ID":"caa7b547-96ec-4119-87b1-fa14697ba9d1","Type":"ContainerDied","Data":"fe383ba7337414ab89ed6f97c783d772b2280604308842589f610caf6302960b"} Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.902437 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5767fbd5b4-jmjzp" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.949475 4954 scope.go:117] "RemoveContainer" containerID="f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326" Nov 28 16:36:52 crc kubenswrapper[4954]: E1128 16:36:52.950202 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326\": container with ID starting with f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326 not found: ID does not exist" containerID="f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.950471 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326"} err="failed to get container status \"f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326\": rpc error: code = NotFound desc = could not find container \"f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326\": container with ID starting with f4db01dc531cb4daa493907624fc94d1b1d3c77f1683d34c548a3b5fd3be9326 not found: ID does not exist" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.950624 4954 scope.go:117] "RemoveContainer" containerID="de3a57e387f1582f6570d0c5f5e659d725722d24b50d157c852c9079752bde22" Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.972594 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-vmnzt"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.976632 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-lpwkg"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.984078 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-vmnzt"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.990441 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-lpwkg"] Nov 28 16:36:52 crc kubenswrapper[4954]: I1128 16:36:52.996271 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0528b-account-delete-4cx2z"] Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.003344 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-528b-account-create-update-g72ls"] Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.010026 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-528b-account-create-update-g72ls"] Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.018029 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0528b-account-delete-4cx2z"] Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.023826 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5767fbd5b4-jmjzp"] Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.030035 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5767fbd5b4-jmjzp"] Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.872797 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="030c776d-b20a-4852-a19b-ccf67b8589b5" path="/var/lib/kubelet/pods/030c776d-b20a-4852-a19b-ccf67b8589b5/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.874180 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b7b0248-e1e6-40f5-9edb-0dbece5f12ae" path="/var/lib/kubelet/pods/1b7b0248-e1e6-40f5-9edb-0dbece5f12ae/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.874790 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bc7ec75-0145-47f2-8193-28c8f17b572a" path="/var/lib/kubelet/pods/1bc7ec75-0145-47f2-8193-28c8f17b572a/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.875935 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d348de7-2e67-41df-8d59-4692491ea145" path="/var/lib/kubelet/pods/1d348de7-2e67-41df-8d59-4692491ea145/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.877831 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f8f01be-65c4-4a56-85e5-1b46ba5804ee" path="/var/lib/kubelet/pods/1f8f01be-65c4-4a56-85e5-1b46ba5804ee/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.878419 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2636eac0-4a90-4144-9172-16b5bcc80ca4" path="/var/lib/kubelet/pods/2636eac0-4a90-4144-9172-16b5bcc80ca4/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.879597 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" path="/var/lib/kubelet/pods/3022fb04-fc0f-44b1-9f97-3893aa4bdd68/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.880363 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31e642ba-e4e1-42e7-aa70-6fa045bbc379" path="/var/lib/kubelet/pods/31e642ba-e4e1-42e7-aa70-6fa045bbc379/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.881350 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="537c74c3-f8aa-4e8f-961b-87bf6a088574" path="/var/lib/kubelet/pods/537c74c3-f8aa-4e8f-961b-87bf6a088574/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.882053 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6abe9424-aabe-4ae6-8032-79b92583d31d" path="/var/lib/kubelet/pods/6abe9424-aabe-4ae6-8032-79b92583d31d/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.883463 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="762dc5ac-a8d3-4b91-806c-079e568530b1" path="/var/lib/kubelet/pods/762dc5ac-a8d3-4b91-806c-079e568530b1/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.884417 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b278f77-7514-4f2d-8db4-f9f209dc33dd" path="/var/lib/kubelet/pods/7b278f77-7514-4f2d-8db4-f9f209dc33dd/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.885099 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" path="/var/lib/kubelet/pods/8a252e1a-c96a-4f98-b24e-b224fedf344c/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.886181 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a6b461d-5be3-4805-9ba7-36cffead16e9" path="/var/lib/kubelet/pods/8a6b461d-5be3-4805-9ba7-36cffead16e9/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.886724 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b16e07c-9b6f-429b-b239-3efa92727910" path="/var/lib/kubelet/pods/8b16e07c-9b6f-429b-b239-3efa92727910/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.887378 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" path="/var/lib/kubelet/pods/8bff6e67-d9f4-4952-992d-1fa362d23a5c/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.888458 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="988f8c45-0585-47e8-b9aa-c02c53c62c75" path="/var/lib/kubelet/pods/988f8c45-0585-47e8-b9aa-c02c53c62c75/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.889072 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acf16584-ed76-41dc-955b-17e86a277627" path="/var/lib/kubelet/pods/acf16584-ed76-41dc-955b-17e86a277627/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.889589 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af0c7199-9850-4553-b644-371c7e305443" path="/var/lib/kubelet/pods/af0c7199-9850-4553-b644-371c7e305443/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.890511 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" path="/var/lib/kubelet/pods/afe4302a-413f-48e1-90a9-3f1178e5c6f7/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.891039 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb410d6d-f34f-4433-bab6-5a42c73408ab" path="/var/lib/kubelet/pods/bb410d6d-f34f-4433-bab6-5a42c73408ab/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.891713 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd" path="/var/lib/kubelet/pods/bd2f088a-e1b5-4f44-8296-f6e8d36ce6cd/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.892835 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" path="/var/lib/kubelet/pods/ca81ef12-eb13-468e-81fc-0fdf6aba8830/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.893380 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caa7b547-96ec-4119-87b1-fa14697ba9d1" path="/var/lib/kubelet/pods/caa7b547-96ec-4119-87b1-fa14697ba9d1/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.893961 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d62a7c52-13eb-499e-acb2-8899ac6984f8" path="/var/lib/kubelet/pods/d62a7c52-13eb-499e-acb2-8899ac6984f8/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.895045 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d887f697-ad36-44e6-8011-878d0a78b3bf" path="/var/lib/kubelet/pods/d887f697-ad36-44e6-8011-878d0a78b3bf/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.895698 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dac05eb0-cb63-47c0-8542-0e33bc1d8963" path="/var/lib/kubelet/pods/dac05eb0-cb63-47c0-8542-0e33bc1d8963/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.896471 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" path="/var/lib/kubelet/pods/e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.897691 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e880dbc3-1219-4582-b093-96e007d91831" path="/var/lib/kubelet/pods/e880dbc3-1219-4582-b093-96e007d91831/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.898170 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee22e0b0-27ab-4365-9ccc-6b563eb44b3a" path="/var/lib/kubelet/pods/ee22e0b0-27ab-4365-9ccc-6b563eb44b3a/volumes" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.923497 4954 generic.go:334] "Generic (PLEG): container finished" podID="7cef279d-a444-456d-8add-1998974a6e08" containerID="190064bf648d40792650ba482e7ac25d6d7c2fabc259baf1d0b1b45c5abca205" exitCode=0 Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.923565 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerDied","Data":"190064bf648d40792650ba482e7ac25d6d7c2fabc259baf1d0b1b45c5abca205"} Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.923600 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7cef279d-a444-456d-8add-1998974a6e08","Type":"ContainerDied","Data":"6f42dbbb98e2b3df9dc95cc8c60bf46034efa15d17bb2c64b7a730ed3ce33760"} Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.923614 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f42dbbb98e2b3df9dc95cc8c60bf46034efa15d17bb2c64b7a730ed3ce33760" Nov 28 16:36:53 crc kubenswrapper[4954]: I1128 16:36:53.924176 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.018555 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-scripts\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.018637 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-combined-ca-bundle\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.018662 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-config-data\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.018684 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-sg-core-conf-yaml\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.018725 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s67r7\" (UniqueName: \"kubernetes.io/projected/7cef279d-a444-456d-8add-1998974a6e08-kube-api-access-s67r7\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.019422 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-ceilometer-tls-certs\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.019530 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-log-httpd\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.019672 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-run-httpd\") pod \"7cef279d-a444-456d-8add-1998974a6e08\" (UID: \"7cef279d-a444-456d-8add-1998974a6e08\") " Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.019962 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.019986 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.023656 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-scripts" (OuterVolumeSpecName: "scripts") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.040763 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cef279d-a444-456d-8add-1998974a6e08-kube-api-access-s67r7" (OuterVolumeSpecName: "kube-api-access-s67r7") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "kube-api-access-s67r7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.047397 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.060688 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.076407 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.096569 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-config-data" (OuterVolumeSpecName: "config-data") pod "7cef279d-a444-456d-8add-1998974a6e08" (UID: "7cef279d-a444-456d-8add-1998974a6e08"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.120897 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.120945 4954 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.120958 4954 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.120971 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s67r7\" (UniqueName: \"kubernetes.io/projected/7cef279d-a444-456d-8add-1998974a6e08-kube-api-access-s67r7\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.120985 4954 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.120997 4954 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.121005 4954 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7cef279d-a444-456d-8add-1998974a6e08-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.121013 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7cef279d-a444-456d-8add-1998974a6e08-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.525925 4954 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.164:8776/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 16:36:54 crc kubenswrapper[4954]: I1128 16:36:54.945703 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 16:36:55 crc kubenswrapper[4954]: I1128 16:36:55.000457 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:36:55 crc kubenswrapper[4954]: I1128 16:36:55.005923 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.108963 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.109502 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.109776 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.109967 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.110078 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.111350 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.115917 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:36:55 crc kubenswrapper[4954]: E1128 16:36:55.115988 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:36:55 crc kubenswrapper[4954]: I1128 16:36:55.864262 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cef279d-a444-456d-8add-1998974a6e08" path="/var/lib/kubelet/pods/7cef279d-a444-456d-8add-1998974a6e08/volumes" Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.107466 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.108622 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.108984 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.109028 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.110102 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.111444 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.112959 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:00 crc kubenswrapper[4954]: E1128 16:37:00.113006 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:37:02 crc kubenswrapper[4954]: I1128 16:37:02.481144 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:37:02 crc kubenswrapper[4954]: I1128 16:37:02.481209 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.106683 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.107430 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.108127 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.108554 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.108594 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.109833 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.113227 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:05 crc kubenswrapper[4954]: E1128 16:37:05.113274 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.058802 4954 generic.go:334] "Generic (PLEG): container finished" podID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerID="50b931559f7a77fb0607febad2cc3106710093b63140da41ba7de9a70984ad4b" exitCode=0 Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.059211 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bdd54468f-gzf48" event={"ID":"6ce804c9-edba-4404-9099-4c0f102aa1b2","Type":"ContainerDied","Data":"50b931559f7a77fb0607febad2cc3106710093b63140da41ba7de9a70984ad4b"} Nov 28 16:37:06 crc kubenswrapper[4954]: E1128 16:37:06.159620 4954 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ce804c9_edba_4404_9099_4c0f102aa1b2.slice/crio-conmon-50b931559f7a77fb0607febad2cc3106710093b63140da41ba7de9a70984ad4b.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.382956 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.416653 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-public-tls-certs\") pod \"6ce804c9-edba-4404-9099-4c0f102aa1b2\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.416927 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-httpd-config\") pod \"6ce804c9-edba-4404-9099-4c0f102aa1b2\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.417040 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-config\") pod \"6ce804c9-edba-4404-9099-4c0f102aa1b2\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.417675 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-combined-ca-bundle\") pod \"6ce804c9-edba-4404-9099-4c0f102aa1b2\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.417809 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-internal-tls-certs\") pod \"6ce804c9-edba-4404-9099-4c0f102aa1b2\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.417934 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-ovndb-tls-certs\") pod \"6ce804c9-edba-4404-9099-4c0f102aa1b2\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.418101 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p27vt\" (UniqueName: \"kubernetes.io/projected/6ce804c9-edba-4404-9099-4c0f102aa1b2-kube-api-access-p27vt\") pod \"6ce804c9-edba-4404-9099-4c0f102aa1b2\" (UID: \"6ce804c9-edba-4404-9099-4c0f102aa1b2\") " Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.422111 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "6ce804c9-edba-4404-9099-4c0f102aa1b2" (UID: "6ce804c9-edba-4404-9099-4c0f102aa1b2"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.422875 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ce804c9-edba-4404-9099-4c0f102aa1b2-kube-api-access-p27vt" (OuterVolumeSpecName: "kube-api-access-p27vt") pod "6ce804c9-edba-4404-9099-4c0f102aa1b2" (UID: "6ce804c9-edba-4404-9099-4c0f102aa1b2"). InnerVolumeSpecName "kube-api-access-p27vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.456802 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6ce804c9-edba-4404-9099-4c0f102aa1b2" (UID: "6ce804c9-edba-4404-9099-4c0f102aa1b2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.458121 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-config" (OuterVolumeSpecName: "config") pod "6ce804c9-edba-4404-9099-4c0f102aa1b2" (UID: "6ce804c9-edba-4404-9099-4c0f102aa1b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.459231 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6ce804c9-edba-4404-9099-4c0f102aa1b2" (UID: "6ce804c9-edba-4404-9099-4c0f102aa1b2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.460150 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ce804c9-edba-4404-9099-4c0f102aa1b2" (UID: "6ce804c9-edba-4404-9099-4c0f102aa1b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.473400 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "6ce804c9-edba-4404-9099-4c0f102aa1b2" (UID: "6ce804c9-edba-4404-9099-4c0f102aa1b2"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.519997 4954 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.520033 4954 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.520046 4954 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.520058 4954 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.520070 4954 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.520106 4954 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ce804c9-edba-4404-9099-4c0f102aa1b2-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:06 crc kubenswrapper[4954]: I1128 16:37:06.520118 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p27vt\" (UniqueName: \"kubernetes.io/projected/6ce804c9-edba-4404-9099-4c0f102aa1b2-kube-api-access-p27vt\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:07 crc kubenswrapper[4954]: I1128 16:37:07.072508 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-bdd54468f-gzf48" event={"ID":"6ce804c9-edba-4404-9099-4c0f102aa1b2","Type":"ContainerDied","Data":"6247dea11c8baa76d5504944acc0e1d5678e3686708ccb7d89f295eda08a5665"} Nov 28 16:37:07 crc kubenswrapper[4954]: I1128 16:37:07.072628 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-bdd54468f-gzf48" Nov 28 16:37:07 crc kubenswrapper[4954]: I1128 16:37:07.072682 4954 scope.go:117] "RemoveContainer" containerID="42c4ec9ba6dc6f636e97ef6169d3c32ab858524488e20c3e4beb2047ba334405" Nov 28 16:37:07 crc kubenswrapper[4954]: I1128 16:37:07.102737 4954 scope.go:117] "RemoveContainer" containerID="50b931559f7a77fb0607febad2cc3106710093b63140da41ba7de9a70984ad4b" Nov 28 16:37:07 crc kubenswrapper[4954]: I1128 16:37:07.103130 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-bdd54468f-gzf48"] Nov 28 16:37:07 crc kubenswrapper[4954]: I1128 16:37:07.109194 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-bdd54468f-gzf48"] Nov 28 16:37:07 crc kubenswrapper[4954]: I1128 16:37:07.870009 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" path="/var/lib/kubelet/pods/6ce804c9-edba-4404-9099-4c0f102aa1b2/volumes" Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.106768 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.107590 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.107836 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.107948 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.108023 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.109120 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.110320 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 16:37:10 crc kubenswrapper[4954]: E1128 16:37:10.110355 4954 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-d4vfs" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.155221 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-d4vfs_2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8/ovs-vswitchd/0.log" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.157398 4954 generic.go:334] "Generic (PLEG): container finished" podID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" exitCode=137 Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.157457 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerDied","Data":"7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc"} Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.167302 4954 generic.go:334] "Generic (PLEG): container finished" podID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerID="f3f394c2aab82db3f2e4a7ff59593262ff98b913c3e8a66fa7b1fc990d8e2ded" exitCode=137 Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.167349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"f3f394c2aab82db3f2e4a7ff59593262ff98b913c3e8a66fa7b1fc990d8e2ded"} Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.167412 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6e076e8f-11b0-48ea-a26c-302df7a0ed2f","Type":"ContainerDied","Data":"816602151bb9f389d6417db9f5912a2b060b0665b424223e190bcfe2fb630c24"} Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.167432 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="816602151bb9f389d6417db9f5912a2b060b0665b424223e190bcfe2fb630c24" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.169367 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.235138 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") pod \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.235196 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.235252 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-lock\") pod \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.235292 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-cache\") pod \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.235330 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-947jp\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-kube-api-access-947jp\") pod \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\" (UID: \"6e076e8f-11b0-48ea-a26c-302df7a0ed2f\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.236025 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-lock" (OuterVolumeSpecName: "lock") pod "6e076e8f-11b0-48ea-a26c-302df7a0ed2f" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.236462 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-cache" (OuterVolumeSpecName: "cache") pod "6e076e8f-11b0-48ea-a26c-302df7a0ed2f" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.240726 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "swift") pod "6e076e8f-11b0-48ea-a26c-302df7a0ed2f" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.241206 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-kube-api-access-947jp" (OuterVolumeSpecName: "kube-api-access-947jp") pod "6e076e8f-11b0-48ea-a26c-302df7a0ed2f" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f"). InnerVolumeSpecName "kube-api-access-947jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.242248 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "6e076e8f-11b0-48ea-a26c-302df7a0ed2f" (UID: "6e076e8f-11b0-48ea-a26c-302df7a0ed2f"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.341540 4954 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.341615 4954 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.341628 4954 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-lock\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.341637 4954 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-cache\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.341649 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-947jp\" (UniqueName: \"kubernetes.io/projected/6e076e8f-11b0-48ea-a26c-302df7a0ed2f-kube-api-access-947jp\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.357709 4954 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.447564 4954 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.583838 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-d4vfs_2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8/ovs-vswitchd/0.log" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.584790 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652319 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c94w8\" (UniqueName: \"kubernetes.io/projected/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-kube-api-access-c94w8\") pod \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652461 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-scripts\") pod \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652509 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-lib\") pod \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652567 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-log\") pod \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652594 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-etc-ovs\") pod \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652642 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-run\") pod \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\" (UID: \"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8\") " Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652653 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-lib" (OuterVolumeSpecName: "var-lib") pod "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" (UID: "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652683 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" (UID: "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652694 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-log" (OuterVolumeSpecName: "var-log") pod "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" (UID: "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.652784 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-run" (OuterVolumeSpecName: "var-run") pod "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" (UID: "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.653003 4954 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-lib\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.653031 4954 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.653043 4954 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.653055 4954 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.653641 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-scripts" (OuterVolumeSpecName: "scripts") pod "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" (UID: "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.657600 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-kube-api-access-c94w8" (OuterVolumeSpecName: "kube-api-access-c94w8") pod "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" (UID: "2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8"). InnerVolumeSpecName "kube-api-access-c94w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.754730 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c94w8\" (UniqueName: \"kubernetes.io/projected/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-kube-api-access-c94w8\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:14 crc kubenswrapper[4954]: I1128 16:37:14.754776 4954 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.177507 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-d4vfs_2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8/ovs-vswitchd/0.log" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.179194 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-d4vfs" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.179201 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-d4vfs" event={"ID":"2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8","Type":"ContainerDied","Data":"47b82663b31676ac614d5a632c30543c1167fc1670b1affa47084d09aabd00bf"} Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.179223 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.179759 4954 scope.go:117] "RemoveContainer" containerID="7159a194542184c4a2c2a5ef778ac6fd51273667bd4d60f3a2ad450c670647dc" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.215428 4954 scope.go:117] "RemoveContainer" containerID="1f2e078ecc13522ae7c5c154c39330743bf59c81be420994d580b71d0e902d90" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.216560 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.244326 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.244544 4954 scope.go:117] "RemoveContainer" containerID="9f05ff47002e77f6711c2164ddba94188c5ecac38bf234177c69c0542ddbe01f" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.251185 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-d4vfs"] Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.257075 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-d4vfs"] Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.864262 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" path="/var/lib/kubelet/pods/2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8/volumes" Nov 28 16:37:15 crc kubenswrapper[4954]: I1128 16:37:15.865174 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" path="/var/lib/kubelet/pods/6e076e8f-11b0-48ea-a26c-302df7a0ed2f/volumes" Nov 28 16:37:19 crc kubenswrapper[4954]: I1128 16:37:19.623202 4954 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod43af6ac7-70c5-43ff-84a0-0f6b6159ae66"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod43af6ac7-70c5-43ff-84a0-0f6b6159ae66] : Timed out while waiting for systemd to remove kubepods-besteffort-pod43af6ac7_70c5_43ff_84a0_0f6b6159ae66.slice" Nov 28 16:37:19 crc kubenswrapper[4954]: E1128 16:37:19.623612 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod43af6ac7-70c5-43ff-84a0-0f6b6159ae66] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod43af6ac7-70c5-43ff-84a0-0f6b6159ae66] : Timed out while waiting for systemd to remove kubepods-besteffort-pod43af6ac7_70c5_43ff_84a0_0f6b6159ae66.slice" pod="openstack/nova-cell1-conductor-0" podUID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" Nov 28 16:37:20 crc kubenswrapper[4954]: I1128 16:37:20.255247 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:37:20 crc kubenswrapper[4954]: I1128 16:37:20.299157 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:37:20 crc kubenswrapper[4954]: I1128 16:37:20.303923 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:37:21 crc kubenswrapper[4954]: I1128 16:37:21.865894 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" path="/var/lib/kubelet/pods/43af6ac7-70c5-43ff-84a0-0f6b6159ae66/volumes" Nov 28 16:37:32 crc kubenswrapper[4954]: I1128 16:37:32.482114 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:37:32 crc kubenswrapper[4954]: I1128 16:37:32.482624 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:37:32 crc kubenswrapper[4954]: I1128 16:37:32.482700 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:37:32 crc kubenswrapper[4954]: I1128 16:37:32.483938 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:37:32 crc kubenswrapper[4954]: I1128 16:37:32.484015 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" gracePeriod=600 Nov 28 16:37:32 crc kubenswrapper[4954]: E1128 16:37:32.605146 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:37:33 crc kubenswrapper[4954]: I1128 16:37:33.387447 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" exitCode=0 Nov 28 16:37:33 crc kubenswrapper[4954]: I1128 16:37:33.387506 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352"} Nov 28 16:37:33 crc kubenswrapper[4954]: I1128 16:37:33.387563 4954 scope.go:117] "RemoveContainer" containerID="63d17d14b7b387446e6790d2542f732de1df551f140ddd4727c4084d5078e4ad" Nov 28 16:37:33 crc kubenswrapper[4954]: I1128 16:37:33.388252 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:37:33 crc kubenswrapper[4954]: E1128 16:37:33.388683 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:37:47 crc kubenswrapper[4954]: I1128 16:37:47.861264 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:37:47 crc kubenswrapper[4954]: E1128 16:37:47.862035 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:38:01 crc kubenswrapper[4954]: I1128 16:38:01.856881 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:38:01 crc kubenswrapper[4954]: E1128 16:38:01.857706 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.338343 4954 scope.go:117] "RemoveContainer" containerID="b9a5897d5f8e65f42720138f7ef57bcd5702772168bdecf361e4be0f16626b95" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.365088 4954 scope.go:117] "RemoveContainer" containerID="0ed6f2d737022bca1d619301fdbfeda02b49d72ac4c9728e5a5e8d61013a751f" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.391640 4954 scope.go:117] "RemoveContainer" containerID="a8372f58c68e6bbdd280dfc7db7108a88ab136fc4bc2e5c65170e347000c679f" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.427455 4954 scope.go:117] "RemoveContainer" containerID="64fd4fcf3a2617a8546b352438f8e3e0a582c3c419306e2e045c132b0817e838" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.454451 4954 scope.go:117] "RemoveContainer" containerID="7d6fe30a2f91cd279d988851b8d6ef64b6b98f0bd3cfe2f1f5f96da4fdaecd36" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.493257 4954 scope.go:117] "RemoveContainer" containerID="7d88064efa113b13270a380653c1ca5ca6b16375b3af64a054f3dfe5666ce593" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.513138 4954 scope.go:117] "RemoveContainer" containerID="b9df2fdeddb1068c516718c64131c970593e818b9cd77028270f8bd7c828f26c" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.531475 4954 scope.go:117] "RemoveContainer" containerID="8e4a60a0c9ebe4259702f5bf21ad46c42f85291c4afce54389154b16e2fc0736" Nov 28 16:38:02 crc kubenswrapper[4954]: I1128 16:38:02.548253 4954 scope.go:117] "RemoveContainer" containerID="5fa26db3f297ab8d0dc997e2fdc6ffd39edf2b0f78a79277102da75c5bcef787" Nov 28 16:38:15 crc kubenswrapper[4954]: I1128 16:38:15.856135 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:38:15 crc kubenswrapper[4954]: E1128 16:38:15.858168 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:38:26 crc kubenswrapper[4954]: I1128 16:38:26.856074 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:38:26 crc kubenswrapper[4954]: E1128 16:38:26.856960 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:38:37 crc kubenswrapper[4954]: I1128 16:38:37.867042 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:38:37 crc kubenswrapper[4954]: E1128 16:38:37.868338 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:38:52 crc kubenswrapper[4954]: I1128 16:38:52.855638 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:38:52 crc kubenswrapper[4954]: E1128 16:38:52.856361 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.758002 4954 scope.go:117] "RemoveContainer" containerID="a2bca9bc4ada3b3fc6bc0382c9d6662693c39fd5f31277e3ff70afcea99f052c" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.782274 4954 scope.go:117] "RemoveContainer" containerID="dcc789be8b7c6143065659690f57fb8fb47dcf73a3d5b9f4d2416eef216a83d6" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.809174 4954 scope.go:117] "RemoveContainer" containerID="d21ef8c65b44b65b7b352925f975d0e931485db1bbb827c59654bc000dd7fc7f" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.828506 4954 scope.go:117] "RemoveContainer" containerID="c7d6a657408296b360af63d4c2d29f78837207a3fc468b6abbbb429d313f11c9" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.848984 4954 scope.go:117] "RemoveContainer" containerID="c2b37521bc0b4d9273f142d48f21d904914bf907956e7cd8621e25815eee2dd4" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.906316 4954 scope.go:117] "RemoveContainer" containerID="201e26515a6013ab930eca8cf3032c3b5c434f99841737467e7187c6b2390519" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.945147 4954 scope.go:117] "RemoveContainer" containerID="f7eddcd9f20ba7f9f2147ed350c853521c58d6d15dcfafe774c3baf1dca0680d" Nov 28 16:39:02 crc kubenswrapper[4954]: I1128 16:39:02.967939 4954 scope.go:117] "RemoveContainer" containerID="12f438b6bdc2aca45e92761c09020e8d83c9a7918f7f3aa86649cd411ca7a04c" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.003894 4954 scope.go:117] "RemoveContainer" containerID="aa502d58af9315bcf5906e11b75470ef748ed5deffc2e4f87f6a86e7decf16e6" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.020573 4954 scope.go:117] "RemoveContainer" containerID="b3f9dee958ec0b21d8a91a7dab2c06039dfd85f545f18ca89dd9084c1a7e3b93" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.037435 4954 scope.go:117] "RemoveContainer" containerID="a2014dcb5213ccabf2e5a08d052c48b1fa541345bc969ed78a02b8acbd0b836e" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.058569 4954 scope.go:117] "RemoveContainer" containerID="5bb02d057581590a07be4f6c68ca4784ff007688bf3a7b3a1009e0c9d6023107" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.079303 4954 scope.go:117] "RemoveContainer" containerID="e140efb0c7b25f4ec739eacce54c1dd0ea255bfae35614850ec69f60dbf6c5dc" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.100442 4954 scope.go:117] "RemoveContainer" containerID="41ade0bb5410fb26da6929cbab355527e23f47061fb8a9b3b265c2dba9186585" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.124119 4954 scope.go:117] "RemoveContainer" containerID="f3f394c2aab82db3f2e4a7ff59593262ff98b913c3e8a66fa7b1fc990d8e2ded" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.143328 4954 scope.go:117] "RemoveContainer" containerID="d6384cb554ea3695e28b1473c8d2c8cabce3a920e913bc674060bf33f2b8c9a0" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.162483 4954 scope.go:117] "RemoveContainer" containerID="70ce2a58db6b12e9c1165d80ed4370774b1cb1db85fb532b8fc97a52c67cbe79" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.184990 4954 scope.go:117] "RemoveContainer" containerID="9c6096808e547acb9a0bfe8d3ba36953837f47d196dc23225e34634802657a07" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.237585 4954 scope.go:117] "RemoveContainer" containerID="3b2e7c6f172b6a2ff64b3c07fb45f6a3eac2e679a49cb30045322a947be9c0a2" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.256664 4954 scope.go:117] "RemoveContainer" containerID="a3de1c50e569442c4d137eb6c4f01fa275602e3c8780f037bca31766cae6424c" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.271838 4954 scope.go:117] "RemoveContainer" containerID="ec66bc366041d8589f9f9c599f24a864c0e0bb563e8e92223216a3166545a6a1" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.292027 4954 scope.go:117] "RemoveContainer" containerID="aa31d4b7c23c133e9bd78b541ca96d27bc06d352ed360aed9ac2990e051d421b" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.330817 4954 scope.go:117] "RemoveContainer" containerID="3a8c57cddf7184457522309c1f75ea85035dd861fa97da4dc3572e601c38586a" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.350421 4954 scope.go:117] "RemoveContainer" containerID="9675109c04b083255a1cea8272db303a0e47b5af1a63b40b8104a6ec81ac39f4" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.383894 4954 scope.go:117] "RemoveContainer" containerID="3ea77e957a4e39b75500e11e0d178d493dd4efdd5e75e1095f6f7266e8af1383" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.415373 4954 scope.go:117] "RemoveContainer" containerID="e269517f2a950366081e2a4332d4c243079436ed72f1864b1a8bba98e71728ad" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.434375 4954 scope.go:117] "RemoveContainer" containerID="975205d87b97524a1fbe645abb8c4c0762a091a627b07f886282ee646cab8c88" Nov 28 16:39:03 crc kubenswrapper[4954]: I1128 16:39:03.856190 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:39:03 crc kubenswrapper[4954]: E1128 16:39:03.856957 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:39:14 crc kubenswrapper[4954]: I1128 16:39:14.856755 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:39:14 crc kubenswrapper[4954]: E1128 16:39:14.857690 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:39:27 crc kubenswrapper[4954]: I1128 16:39:27.864351 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:39:27 crc kubenswrapper[4954]: E1128 16:39:27.865147 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:39:42 crc kubenswrapper[4954]: I1128 16:39:42.855765 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:39:42 crc kubenswrapper[4954]: E1128 16:39:42.856470 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:39:57 crc kubenswrapper[4954]: I1128 16:39:57.860831 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:39:57 crc kubenswrapper[4954]: E1128 16:39:57.862279 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.632522 4954 scope.go:117] "RemoveContainer" containerID="2ef96eee105583bb4f5aff04b8ac6bdfa71044142eddd069aee2fd7df3cab725" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.656140 4954 scope.go:117] "RemoveContainer" containerID="b9815c657f848312db17e79942f5eddf0b33ee714edd4d18a3accefb78385c5d" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.677426 4954 scope.go:117] "RemoveContainer" containerID="52deb0917a2ec0317872f4eeacd25fd59c248a2c1d2a87b43ad82bac7ff9de0b" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.703742 4954 scope.go:117] "RemoveContainer" containerID="98b35779e547ce4c6fa5ddd4466450c48cf21079fb3271d4d7874c2a5254e844" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.733292 4954 scope.go:117] "RemoveContainer" containerID="25d180f3bd81c9004a4db6ad4e61d48011cac275e60bfe32015be950a41aedcd" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.756410 4954 scope.go:117] "RemoveContainer" containerID="d5248dbf6c49a21e5ab30bb4fdda938ceedcde3b2dae112a6d10da5d1ac67592" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.781418 4954 scope.go:117] "RemoveContainer" containerID="14738597839ebf173901848778ebcbc2200b247d2d7cda2691a12638166bcddc" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.848022 4954 scope.go:117] "RemoveContainer" containerID="009a5d1b2959b5c543787965c35edf048d0545676eeab27f33be2b63101529d1" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.874786 4954 scope.go:117] "RemoveContainer" containerID="8add70c010322f7f578f7c468da2668563f210dfc294ebf9f41c56210d6165fb" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.893672 4954 scope.go:117] "RemoveContainer" containerID="b32195ea2b6bb28ee8316a1b9b4bf35d1d04d3b7ae1a048a81a1ccfdd4512e24" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.911588 4954 scope.go:117] "RemoveContainer" containerID="786c11653ae6fc057189678b362be2013431f0eae8133efcb4e6222525e8f2c6" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.935646 4954 scope.go:117] "RemoveContainer" containerID="f657b195bba8965fc3348f96e5b3055b8c89ded434fec2bb372b073bc85deee0" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.975090 4954 scope.go:117] "RemoveContainer" containerID="6537c4c8cd8dd3fab3e5160d3fe8c38e05653b1a2fb44a373fda2650bb61ce42" Nov 28 16:40:03 crc kubenswrapper[4954]: I1128 16:40:03.994872 4954 scope.go:117] "RemoveContainer" containerID="97b634ca797866be1255f1100d6abda787c78438108b953d4c36c125fda1b7b9" Nov 28 16:40:04 crc kubenswrapper[4954]: I1128 16:40:04.018044 4954 scope.go:117] "RemoveContainer" containerID="3fd64b82b333a2706f84c6fbd560eff95ef6f361783870f68150a1b52899e83e" Nov 28 16:40:04 crc kubenswrapper[4954]: I1128 16:40:04.034119 4954 scope.go:117] "RemoveContainer" containerID="9790d31bac29981012dc4eb21b9dc17ee5a29b86264289bf2b4f88e66b611652" Nov 28 16:40:12 crc kubenswrapper[4954]: I1128 16:40:12.856002 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:40:12 crc kubenswrapper[4954]: E1128 16:40:12.856546 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:40:25 crc kubenswrapper[4954]: I1128 16:40:25.855978 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:40:25 crc kubenswrapper[4954]: E1128 16:40:25.856682 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:40:38 crc kubenswrapper[4954]: I1128 16:40:38.857494 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:40:38 crc kubenswrapper[4954]: E1128 16:40:38.858154 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:40:52 crc kubenswrapper[4954]: I1128 16:40:52.855748 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:40:52 crc kubenswrapper[4954]: E1128 16:40:52.856584 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.198456 4954 scope.go:117] "RemoveContainer" containerID="314adb3fa6d873c20f516e93149e8e59ed337d97dff41180c073d8bff231d8d9" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.268301 4954 scope.go:117] "RemoveContainer" containerID="b10557bf3df3409a81c72de26c7fe710e7c9b5f93df08097a1eabd4e26aa933a" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.312392 4954 scope.go:117] "RemoveContainer" containerID="b5f872f3abeb204d638458c08cdab2c3af846f6d414097df69868f6e72c5d4c3" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.337832 4954 scope.go:117] "RemoveContainer" containerID="8c6a6aea7a9dab1c90db663a53190b3d385f2ed88fd26fba749f3549c251570f" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.370065 4954 scope.go:117] "RemoveContainer" containerID="64d4cc73b002f732d7801401e3ff287905c1abfeedbf8f510d4caf3a46f41b10" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.399825 4954 scope.go:117] "RemoveContainer" containerID="24f751e3e2c8d7d2258b66a8a4a677d4acc17490e7914d661b0d60c02f205516" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.416107 4954 scope.go:117] "RemoveContainer" containerID="0a58c23a2643ee01442026f1cd5c2931589d7ded1516de8835e9e562824e613e" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.432415 4954 scope.go:117] "RemoveContainer" containerID="bf5f47ec2adaca701e2261bfe9e2ba603aeebd57c0748bfc08cf9e1207069dde" Nov 28 16:41:04 crc kubenswrapper[4954]: I1128 16:41:04.449026 4954 scope.go:117] "RemoveContainer" containerID="755de0385bf4e11b5c8e1c31c08371669daa12dda25d66b8c68b992fd6205979" Nov 28 16:41:07 crc kubenswrapper[4954]: I1128 16:41:07.862384 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:41:07 crc kubenswrapper[4954]: E1128 16:41:07.862641 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:41:20 crc kubenswrapper[4954]: I1128 16:41:20.857003 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:41:20 crc kubenswrapper[4954]: E1128 16:41:20.857823 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:41:35 crc kubenswrapper[4954]: I1128 16:41:35.856936 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:41:35 crc kubenswrapper[4954]: E1128 16:41:35.858035 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:41:50 crc kubenswrapper[4954]: I1128 16:41:50.857036 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:41:50 crc kubenswrapper[4954]: E1128 16:41:50.858332 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:42:02 crc kubenswrapper[4954]: I1128 16:42:02.855673 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:42:02 crc kubenswrapper[4954]: E1128 16:42:02.856444 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.528008 4954 scope.go:117] "RemoveContainer" containerID="190064bf648d40792650ba482e7ac25d6d7c2fabc259baf1d0b1b45c5abca205" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.552950 4954 scope.go:117] "RemoveContainer" containerID="509442becd9f8224ea575b681dffcd00ba195d6a23b535824e28058798c86086" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.583831 4954 scope.go:117] "RemoveContainer" containerID="f3b3be1f06f6ac14c8e5f8001d2e6742a30e971699d14e2b691a416c75f5f699" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.611242 4954 scope.go:117] "RemoveContainer" containerID="12e64b79d883c0960f7d869751df2054fc61218ac1ea6063c9a82354936f8e17" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.632606 4954 scope.go:117] "RemoveContainer" containerID="a4137e9f8aff40189511ca5d877f9f9ea4572d1ba1b973060ea1671fe11c5291" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.651381 4954 scope.go:117] "RemoveContainer" containerID="eec27ebb646c765f0cde3b186b7adb59c251c51c9a65684667cfbc3c2314c2a3" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.669260 4954 scope.go:117] "RemoveContainer" containerID="f395a2e9a2f791976be90eae75233a52a92153cf8e478c7bc3edfa54acb7f9ea" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.710518 4954 scope.go:117] "RemoveContainer" containerID="a90a636e9b99d45f8039d9676ed4cf0cbaf5b8af82689455191e468cfb83faa4" Nov 28 16:42:04 crc kubenswrapper[4954]: I1128 16:42:04.727795 4954 scope.go:117] "RemoveContainer" containerID="722b392e14e483b0e2f1d174ef2d136d04550f8d796a74666ddf9f8253b0b8ec" Nov 28 16:42:15 crc kubenswrapper[4954]: I1128 16:42:15.856094 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:42:15 crc kubenswrapper[4954]: E1128 16:42:15.856835 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:42:30 crc kubenswrapper[4954]: I1128 16:42:30.856913 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:42:30 crc kubenswrapper[4954]: E1128 16:42:30.858401 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:42:42 crc kubenswrapper[4954]: I1128 16:42:42.856048 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:42:43 crc kubenswrapper[4954]: I1128 16:42:43.101972 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"d50a8efc4b1b81210ca4bb44dd5e48836ad1dfd0a22eb35b09931efb376480fa"} Nov 28 16:43:04 crc kubenswrapper[4954]: I1128 16:43:04.820748 4954 scope.go:117] "RemoveContainer" containerID="5ee583633bf711261f9be82f4e55e1ba9821071c17a3ab4a989453611d8fafec" Nov 28 16:43:04 crc kubenswrapper[4954]: I1128 16:43:04.857492 4954 scope.go:117] "RemoveContainer" containerID="8abf93f87a5b33cbda36466498d343caaa5ecf640b48334af8af9fc2fef2dc20" Nov 28 16:43:04 crc kubenswrapper[4954]: I1128 16:43:04.884261 4954 scope.go:117] "RemoveContainer" containerID="1606ea4346525e54b196374f42bc0cc24f176a431b2adc531d6ed468b2112dea" Nov 28 16:43:04 crc kubenswrapper[4954]: I1128 16:43:04.912198 4954 scope.go:117] "RemoveContainer" containerID="eba850d500c1b9eff48d4c4269433dc00620e092eabe63ce9cb2cc328b7bd894" Nov 28 16:43:04 crc kubenswrapper[4954]: I1128 16:43:04.937636 4954 scope.go:117] "RemoveContainer" containerID="3e12a3d3b371177e96edcb8710ee4e74f64dc99510cabdd5643a86ea0b95b060" Nov 28 16:43:04 crc kubenswrapper[4954]: I1128 16:43:04.967393 4954 scope.go:117] "RemoveContainer" containerID="4086bd643882fbb57adba19a323b9ec69c8f4e9d979fee602b638028e29a8143" Nov 28 16:43:04 crc kubenswrapper[4954]: I1128 16:43:04.986600 4954 scope.go:117] "RemoveContainer" containerID="d6c73c953daac7374bd820d28af1e1380b668dc95777a7b94a7a994654c5848a" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.773690 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4p6pr"] Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774562 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774577 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774588 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f8d636b-e07a-46b1-91b3-899a395e3ce5" containerName="nova-cell0-conductor-conductor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774595 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f8d636b-e07a-46b1-91b3-899a395e3ce5" containerName="nova-cell0-conductor-conductor" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774609 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774616 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774630 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerName="mysql-bootstrap" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774638 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerName="mysql-bootstrap" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774645 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b7b0248-e1e6-40f5-9edb-0dbece5f12ae" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774653 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b7b0248-e1e6-40f5-9edb-0dbece5f12ae" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774661 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-central-agent" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774669 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-central-agent" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774677 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774684 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774694 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-expirer" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774701 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-expirer" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774709 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8f01be-65c4-4a56-85e5-1b46ba5804ee" containerName="nova-scheduler-scheduler" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774717 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8f01be-65c4-4a56-85e5-1b46ba5804ee" containerName="nova-scheduler-scheduler" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774727 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="sg-core" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774736 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="sg-core" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774745 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774752 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774763 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af0c7199-9850-4553-b644-371c7e305443" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774769 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="af0c7199-9850-4553-b644-371c7e305443" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774779 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d348de7-2e67-41df-8d59-4692491ea145" containerName="galera" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774786 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d348de7-2e67-41df-8d59-4692491ea145" containerName="galera" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774798 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="ovsdbserver-nb" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774806 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="ovsdbserver-nb" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774815 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774822 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774833 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="probe" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774842 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="probe" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774851 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774857 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774868 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerName="dnsmasq-dns" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774874 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerName="dnsmasq-dns" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774887 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="762dc5ac-a8d3-4b91-806c-079e568530b1" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774893 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="762dc5ac-a8d3-4b91-806c-079e568530b1" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774905 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf16584-ed76-41dc-955b-17e86a277627" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774910 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf16584-ed76-41dc-955b-17e86a277627" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774918 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774923 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774934 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774940 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774950 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="swift-recon-cron" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774955 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="swift-recon-cron" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774966 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerName="galera" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774974 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerName="galera" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774982 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.774987 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-server" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.774996 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server-init" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775001 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server-init" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775011 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775017 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775026 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775031 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-server" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775038 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-updater" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775044 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-updater" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775051 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775056 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-api" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775067 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="cinder-scheduler" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775072 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="cinder-scheduler" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775080 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775085 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775094 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="rabbitmq" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775099 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="rabbitmq" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775107 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerName="setup-container" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775113 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerName="setup-container" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775124 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775130 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775140 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775145 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-server" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775180 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" containerName="nova-cell1-conductor-conductor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775187 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" containerName="nova-cell1-conductor-conductor" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775197 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775204 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775212 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775217 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775228 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c381eb3-f466-40b7-a962-6782db85678c" containerName="kube-state-metrics" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775233 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c381eb3-f466-40b7-a962-6782db85678c" containerName="kube-state-metrics" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775242 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-metadata" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775248 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-metadata" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775256 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bc7ec75-0145-47f2-8193-28c8f17b572a" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775261 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bc7ec75-0145-47f2-8193-28c8f17b572a" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775268 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775273 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775279 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="setup-container" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775285 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="setup-container" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775291 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" containerName="memcached" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775296 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" containerName="memcached" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775303 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775309 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775315 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6abe9424-aabe-4ae6-8032-79b92583d31d" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775321 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6abe9424-aabe-4ae6-8032-79b92583d31d" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775327 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-reaper" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775333 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-reaper" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775342 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775347 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775355 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775361 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775370 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d348de7-2e67-41df-8d59-4692491ea145" containerName="mysql-bootstrap" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775375 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d348de7-2e67-41df-8d59-4692491ea145" containerName="mysql-bootstrap" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775385 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa7b547-96ec-4119-87b1-fa14697ba9d1" containerName="keystone-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775391 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa7b547-96ec-4119-87b1-fa14697ba9d1" containerName="keystone-api" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775399 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerName="init" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775406 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerName="init" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775416 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf74e9b3-c272-47a3-bd81-1fae19e39236" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775422 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf74e9b3-c272-47a3-bd81-1fae19e39236" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775432 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="ovsdbserver-sb" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775438 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="ovsdbserver-sb" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775446 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775451 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775457 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775463 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775470 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775476 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-api" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775484 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775490 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775498 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775503 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-api" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775511 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="ovn-northd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775516 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="ovn-northd" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775538 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-notification-agent" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775544 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-notification-agent" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775555 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775561 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775571 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775577 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-server" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775586 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="proxy-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775592 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="proxy-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775600 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-updater" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775605 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-updater" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775611 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775617 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775625 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775630 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775639 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775645 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775654 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775660 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775668 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerName="rabbitmq" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775673 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerName="rabbitmq" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775681 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775686 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775693 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775699 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775705 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d887f697-ad36-44e6-8011-878d0a78b3bf" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775711 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d887f697-ad36-44e6-8011-878d0a78b3bf" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775719 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775725 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775733 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775739 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775757 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775767 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: E1128 16:44:58.775778 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="rsync" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775784 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="rsync" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775905 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf74e9b3-c272-47a3-bd81-1fae19e39236" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775916 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4e36ba5-0aa1-4e30-99b7-cd3d8c38fab1" containerName="ovn-controller" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775927 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-metadata" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775934 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-expirer" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775944 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="ovn-northd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775954 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b7b0248-e1e6-40f5-9edb-0dbece5f12ae" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775962 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovs-vswitchd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775972 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f08fb1c-8d18-4b77-bbaf-adcf2c5bc2a8" containerName="ovsdb-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775980 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="acf16584-ed76-41dc-955b-17e86a277627" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775988 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.775994 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776001 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="sg-core" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776010 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776015 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776025 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776031 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="762dc5ac-a8d3-4b91-806c-079e568530b1" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776040 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="swift-recon-cron" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776048 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776054 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-central-agent" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776063 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="43af6ac7-70c5-43ff-84a0-0f6b6159ae66" containerName="nova-cell1-conductor-conductor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776071 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776080 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="52dbe85a-ccd3-4527-af8d-17ad9748d3c4" containerName="dnsmasq-dns" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776090 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776097 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776104 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776112 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776121 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d887f697-ad36-44e6-8011-878d0a78b3bf" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776132 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bff6e67-d9f4-4952-992d-1fa362d23a5c" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776139 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="70613221-3087-4dc3-9f41-86eb6fe88041" containerName="barbican-keystone-listener" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776147 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="ovsdbserver-nb" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776156 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="9023ad7d-6621-4ed8-aec4-bd1d0db53088" containerName="barbican-worker-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776161 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca81ef12-eb13-468e-81fc-0fdf6aba8830" containerName="rabbitmq" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776169 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776177 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c362e30-8109-411f-9f89-21c7c28da6c2" containerName="glance-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776185 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a252e1a-c96a-4f98-b24e-b224fedf344c" containerName="rabbitmq" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776193 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cf746a5-7be5-4e2d-a7f8-18878e2a41a5" containerName="nova-api-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776199 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="91dcac80-4f79-4fbb-81c5-4fd24cb69ccf" containerName="glance-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776204 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776212 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776219 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="probe" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776227 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776233 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="3022fb04-fc0f-44b1-9f97-3893aa4bdd68" containerName="proxy-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776241 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c381eb3-f466-40b7-a962-6782db85678c" containerName="kube-state-metrics" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776251 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-reaper" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776258 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4439c3d-c90f-4b13-87a4-01c211cec875" containerName="cinder-scheduler" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776265 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="892117aa-d306-4a1d-bf6e-b203b6337537" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776274 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f8d636b-e07a-46b1-91b3-899a395e3ce5" containerName="nova-cell0-conductor-conductor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776283 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ce804c9-edba-4404-9099-4c0f102aa1b2" containerName="neutron-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776291 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fb4ddd8-d914-431c-a39f-28a0c6b45354" containerName="galera" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776298 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="642e0138-17d8-47e0-a67d-51a758291f7e" containerName="nova-metadata-log" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776305 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-updater" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776313 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776320 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d8b87c9-4dd0-431c-a555-49141762763a" containerName="cinder-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776329 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-server" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776334 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="af0c7199-9850-4553-b644-371c7e305443" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776343 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="proxy-httpd" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776352 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="78bd4de4-5601-4771-b15c-c240e097519b" containerName="placement-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776359 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="account-auditor" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776368 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="container-updater" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776375 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa7b547-96ec-4119-87b1-fa14697ba9d1" containerName="keystone-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776383 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="afe4302a-413f-48e1-90a9-3f1178e5c6f7" containerName="memcached" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776405 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="object-replicator" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776414 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776422 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e076e8f-11b0-48ea-a26c-302df7a0ed2f" containerName="rsync" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776430 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="041e6ce3-19e7-41a1-ad5e-78d1ad12e6fa" containerName="openstack-network-exporter" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776437 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f8f01be-65c4-4a56-85e5-1b46ba5804ee" containerName="nova-scheduler-scheduler" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776444 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="6abe9424-aabe-4ae6-8032-79b92583d31d" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776452 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b2bd02f-8f58-4ec3-8ac6-66fc9dc394c6" containerName="ovsdbserver-sb" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776460 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="45c8821a-baab-4e3c-8ffb-f4fe71722666" containerName="barbican-api" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776467 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cef279d-a444-456d-8add-1998974a6e08" containerName="ceilometer-notification-agent" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776474 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bc7ec75-0145-47f2-8193-28c8f17b572a" containerName="mariadb-account-delete" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.776480 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d348de7-2e67-41df-8d59-4692491ea145" containerName="galera" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.777849 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.799259 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4p6pr"] Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.885125 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-catalog-content\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.885268 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-utilities\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.885320 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvrfh\" (UniqueName: \"kubernetes.io/projected/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-kube-api-access-dvrfh\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.986698 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-utilities\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.987323 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvrfh\" (UniqueName: \"kubernetes.io/projected/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-kube-api-access-dvrfh\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.987355 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-utilities\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.987365 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-catalog-content\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:58 crc kubenswrapper[4954]: I1128 16:44:58.987818 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-catalog-content\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:59 crc kubenswrapper[4954]: I1128 16:44:59.012188 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvrfh\" (UniqueName: \"kubernetes.io/projected/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-kube-api-access-dvrfh\") pod \"redhat-operators-4p6pr\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:59 crc kubenswrapper[4954]: I1128 16:44:59.108516 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:44:59 crc kubenswrapper[4954]: I1128 16:44:59.573517 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4p6pr"] Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.146095 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g"] Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.147128 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.149338 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.150387 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.154301 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g"] Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.205895 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2831b507-d16f-4c5d-8197-75ca53f2867e-secret-volume\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.206031 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v9ch\" (UniqueName: \"kubernetes.io/projected/2831b507-d16f-4c5d-8197-75ca53f2867e-kube-api-access-4v9ch\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.206119 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2831b507-d16f-4c5d-8197-75ca53f2867e-config-volume\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.262173 4954 generic.go:334] "Generic (PLEG): container finished" podID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerID="80b31aa7bb3b638d3359c987613b9e98c4a8cc2559ac328faa8ab36b52d733f7" exitCode=0 Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.262241 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p6pr" event={"ID":"7bd008e2-e6ca-421d-8adf-d112c0eadbc5","Type":"ContainerDied","Data":"80b31aa7bb3b638d3359c987613b9e98c4a8cc2559ac328faa8ab36b52d733f7"} Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.320106 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p6pr" event={"ID":"7bd008e2-e6ca-421d-8adf-d112c0eadbc5","Type":"ContainerStarted","Data":"6809df890a4567d3615b21407ae771cd7344339d2bcee7d0809a1be0cedeb968"} Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.322166 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v9ch\" (UniqueName: \"kubernetes.io/projected/2831b507-d16f-4c5d-8197-75ca53f2867e-kube-api-access-4v9ch\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.322218 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2831b507-d16f-4c5d-8197-75ca53f2867e-config-volume\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.322276 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2831b507-d16f-4c5d-8197-75ca53f2867e-secret-volume\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.323808 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.323932 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2831b507-d16f-4c5d-8197-75ca53f2867e-config-volume\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.330643 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2831b507-d16f-4c5d-8197-75ca53f2867e-secret-volume\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.358637 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v9ch\" (UniqueName: \"kubernetes.io/projected/2831b507-d16f-4c5d-8197-75ca53f2867e-kube-api-access-4v9ch\") pod \"collect-profiles-29405805-mzb4g\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.463451 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:00 crc kubenswrapper[4954]: W1128 16:45:00.883736 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2831b507_d16f_4c5d_8197_75ca53f2867e.slice/crio-57254be8d158bb3a87e20fbffd393b5183d7bc66ec9966f06f2c44f396f0f17e WatchSource:0}: Error finding container 57254be8d158bb3a87e20fbffd393b5183d7bc66ec9966f06f2c44f396f0f17e: Status 404 returned error can't find the container with id 57254be8d158bb3a87e20fbffd393b5183d7bc66ec9966f06f2c44f396f0f17e Nov 28 16:45:00 crc kubenswrapper[4954]: I1128 16:45:00.892032 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g"] Nov 28 16:45:01 crc kubenswrapper[4954]: I1128 16:45:01.271160 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p6pr" event={"ID":"7bd008e2-e6ca-421d-8adf-d112c0eadbc5","Type":"ContainerStarted","Data":"bed6375f7dcf19781c17ed5b7d4d42eb8c548cfa6be4f2e1ee2ef8309c451643"} Nov 28 16:45:01 crc kubenswrapper[4954]: I1128 16:45:01.273424 4954 generic.go:334] "Generic (PLEG): container finished" podID="2831b507-d16f-4c5d-8197-75ca53f2867e" containerID="6edceb02528b52f6b5506c92c459d2a23cff7512dc8475f5694bc820a6757719" exitCode=0 Nov 28 16:45:01 crc kubenswrapper[4954]: I1128 16:45:01.273507 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" event={"ID":"2831b507-d16f-4c5d-8197-75ca53f2867e","Type":"ContainerDied","Data":"6edceb02528b52f6b5506c92c459d2a23cff7512dc8475f5694bc820a6757719"} Nov 28 16:45:01 crc kubenswrapper[4954]: I1128 16:45:01.273578 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" event={"ID":"2831b507-d16f-4c5d-8197-75ca53f2867e","Type":"ContainerStarted","Data":"57254be8d158bb3a87e20fbffd393b5183d7bc66ec9966f06f2c44f396f0f17e"} Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.283991 4954 generic.go:334] "Generic (PLEG): container finished" podID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerID="bed6375f7dcf19781c17ed5b7d4d42eb8c548cfa6be4f2e1ee2ef8309c451643" exitCode=0 Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.284078 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p6pr" event={"ID":"7bd008e2-e6ca-421d-8adf-d112c0eadbc5","Type":"ContainerDied","Data":"bed6375f7dcf19781c17ed5b7d4d42eb8c548cfa6be4f2e1ee2ef8309c451643"} Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.481411 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.481470 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.584955 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.755879 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2831b507-d16f-4c5d-8197-75ca53f2867e-secret-volume\") pod \"2831b507-d16f-4c5d-8197-75ca53f2867e\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.755986 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2831b507-d16f-4c5d-8197-75ca53f2867e-config-volume\") pod \"2831b507-d16f-4c5d-8197-75ca53f2867e\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.756142 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v9ch\" (UniqueName: \"kubernetes.io/projected/2831b507-d16f-4c5d-8197-75ca53f2867e-kube-api-access-4v9ch\") pod \"2831b507-d16f-4c5d-8197-75ca53f2867e\" (UID: \"2831b507-d16f-4c5d-8197-75ca53f2867e\") " Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.756956 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2831b507-d16f-4c5d-8197-75ca53f2867e-config-volume" (OuterVolumeSpecName: "config-volume") pod "2831b507-d16f-4c5d-8197-75ca53f2867e" (UID: "2831b507-d16f-4c5d-8197-75ca53f2867e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.761859 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2831b507-d16f-4c5d-8197-75ca53f2867e-kube-api-access-4v9ch" (OuterVolumeSpecName: "kube-api-access-4v9ch") pod "2831b507-d16f-4c5d-8197-75ca53f2867e" (UID: "2831b507-d16f-4c5d-8197-75ca53f2867e"). InnerVolumeSpecName "kube-api-access-4v9ch". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.763401 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2831b507-d16f-4c5d-8197-75ca53f2867e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2831b507-d16f-4c5d-8197-75ca53f2867e" (UID: "2831b507-d16f-4c5d-8197-75ca53f2867e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.857519 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v9ch\" (UniqueName: \"kubernetes.io/projected/2831b507-d16f-4c5d-8197-75ca53f2867e-kube-api-access-4v9ch\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.857574 4954 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2831b507-d16f-4c5d-8197-75ca53f2867e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:02 crc kubenswrapper[4954]: I1128 16:45:02.857585 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2831b507-d16f-4c5d-8197-75ca53f2867e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.293644 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.293635 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g" event={"ID":"2831b507-d16f-4c5d-8197-75ca53f2867e","Type":"ContainerDied","Data":"57254be8d158bb3a87e20fbffd393b5183d7bc66ec9966f06f2c44f396f0f17e"} Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.293789 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57254be8d158bb3a87e20fbffd393b5183d7bc66ec9966f06f2c44f396f0f17e" Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.296969 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p6pr" event={"ID":"7bd008e2-e6ca-421d-8adf-d112c0eadbc5","Type":"ContainerStarted","Data":"c60daadd9acd99bd87ca60868ddb54cc588fb6f9fec0461a711eeae4efde1deb"} Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.322055 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4p6pr" podStartSLOduration=2.937684745 podStartE2EDuration="5.3220321s" podCreationTimestamp="2025-11-28 16:44:58 +0000 UTC" firstStartedPulling="2025-11-28 16:45:00.323550577 +0000 UTC m=+2053.715219118" lastFinishedPulling="2025-11-28 16:45:02.707897932 +0000 UTC m=+2056.099566473" observedRunningTime="2025-11-28 16:45:03.317193398 +0000 UTC m=+2056.708861939" watchObservedRunningTime="2025-11-28 16:45:03.3220321 +0000 UTC m=+2056.713700651" Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.658875 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl"] Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.663432 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-q4ftl"] Nov 28 16:45:03 crc kubenswrapper[4954]: I1128 16:45:03.867153 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f18abd7-b65d-47d8-af3e-44008988873e" path="/var/lib/kubelet/pods/2f18abd7-b65d-47d8-af3e-44008988873e/volumes" Nov 28 16:45:05 crc kubenswrapper[4954]: I1128 16:45:05.136470 4954 scope.go:117] "RemoveContainer" containerID="dd92ee1cad6c2ab85fcebd8218d98749c796a250198d22fb99d55a004071a561" Nov 28 16:45:09 crc kubenswrapper[4954]: I1128 16:45:09.109590 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:45:09 crc kubenswrapper[4954]: I1128 16:45:09.110172 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:45:09 crc kubenswrapper[4954]: I1128 16:45:09.155053 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:45:09 crc kubenswrapper[4954]: I1128 16:45:09.386114 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:45:09 crc kubenswrapper[4954]: I1128 16:45:09.428962 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4p6pr"] Nov 28 16:45:11 crc kubenswrapper[4954]: I1128 16:45:11.358410 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4p6pr" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="registry-server" containerID="cri-o://c60daadd9acd99bd87ca60868ddb54cc588fb6f9fec0461a711eeae4efde1deb" gracePeriod=2 Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.383105 4954 generic.go:334] "Generic (PLEG): container finished" podID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerID="c60daadd9acd99bd87ca60868ddb54cc588fb6f9fec0461a711eeae4efde1deb" exitCode=0 Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.383142 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p6pr" event={"ID":"7bd008e2-e6ca-421d-8adf-d112c0eadbc5","Type":"ContainerDied","Data":"c60daadd9acd99bd87ca60868ddb54cc588fb6f9fec0461a711eeae4efde1deb"} Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.464382 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.630670 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-catalog-content\") pod \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.630740 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-utilities\") pod \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.630763 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvrfh\" (UniqueName: \"kubernetes.io/projected/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-kube-api-access-dvrfh\") pod \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\" (UID: \"7bd008e2-e6ca-421d-8adf-d112c0eadbc5\") " Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.632327 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-utilities" (OuterVolumeSpecName: "utilities") pod "7bd008e2-e6ca-421d-8adf-d112c0eadbc5" (UID: "7bd008e2-e6ca-421d-8adf-d112c0eadbc5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.636978 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-kube-api-access-dvrfh" (OuterVolumeSpecName: "kube-api-access-dvrfh") pod "7bd008e2-e6ca-421d-8adf-d112c0eadbc5" (UID: "7bd008e2-e6ca-421d-8adf-d112c0eadbc5"). InnerVolumeSpecName "kube-api-access-dvrfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.732783 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.732814 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvrfh\" (UniqueName: \"kubernetes.io/projected/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-kube-api-access-dvrfh\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.799940 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7bd008e2-e6ca-421d-8adf-d112c0eadbc5" (UID: "7bd008e2-e6ca-421d-8adf-d112c0eadbc5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:14 crc kubenswrapper[4954]: I1128 16:45:14.833948 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd008e2-e6ca-421d-8adf-d112c0eadbc5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.395863 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4p6pr" event={"ID":"7bd008e2-e6ca-421d-8adf-d112c0eadbc5","Type":"ContainerDied","Data":"6809df890a4567d3615b21407ae771cd7344339d2bcee7d0809a1be0cedeb968"} Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.396149 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4p6pr" Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.396681 4954 scope.go:117] "RemoveContainer" containerID="c60daadd9acd99bd87ca60868ddb54cc588fb6f9fec0461a711eeae4efde1deb" Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.437693 4954 scope.go:117] "RemoveContainer" containerID="bed6375f7dcf19781c17ed5b7d4d42eb8c548cfa6be4f2e1ee2ef8309c451643" Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.479381 4954 scope.go:117] "RemoveContainer" containerID="80b31aa7bb3b638d3359c987613b9e98c4a8cc2559ac328faa8ab36b52d733f7" Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.479411 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4p6pr"] Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.487210 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4p6pr"] Nov 28 16:45:15 crc kubenswrapper[4954]: E1128 16:45:15.590953 4954 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7bd008e2_e6ca_421d_8adf_d112c0eadbc5.slice/crio-6809df890a4567d3615b21407ae771cd7344339d2bcee7d0809a1be0cedeb968\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7bd008e2_e6ca_421d_8adf_d112c0eadbc5.slice\": RecentStats: unable to find data in memory cache]" Nov 28 16:45:15 crc kubenswrapper[4954]: I1128 16:45:15.865128 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" path="/var/lib/kubelet/pods/7bd008e2-e6ca-421d-8adf-d112c0eadbc5/volumes" Nov 28 16:45:32 crc kubenswrapper[4954]: I1128 16:45:32.480783 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:45:32 crc kubenswrapper[4954]: I1128 16:45:32.481677 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.352155 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4c7cj"] Nov 28 16:45:46 crc kubenswrapper[4954]: E1128 16:45:46.352996 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="registry-server" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.353010 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="registry-server" Nov 28 16:45:46 crc kubenswrapper[4954]: E1128 16:45:46.353034 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="extract-content" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.353041 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="extract-content" Nov 28 16:45:46 crc kubenswrapper[4954]: E1128 16:45:46.353053 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2831b507-d16f-4c5d-8197-75ca53f2867e" containerName="collect-profiles" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.353059 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="2831b507-d16f-4c5d-8197-75ca53f2867e" containerName="collect-profiles" Nov 28 16:45:46 crc kubenswrapper[4954]: E1128 16:45:46.353073 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="extract-utilities" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.353078 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="extract-utilities" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.353206 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="2831b507-d16f-4c5d-8197-75ca53f2867e" containerName="collect-profiles" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.353226 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd008e2-e6ca-421d-8adf-d112c0eadbc5" containerName="registry-server" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.354228 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.372042 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4c7cj"] Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.507243 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brkbc\" (UniqueName: \"kubernetes.io/projected/31af8111-914e-4844-b376-40f7f0654df0-kube-api-access-brkbc\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.507338 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-catalog-content\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.507365 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-utilities\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.609270 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brkbc\" (UniqueName: \"kubernetes.io/projected/31af8111-914e-4844-b376-40f7f0654df0-kube-api-access-brkbc\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.609350 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-catalog-content\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.609370 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-utilities\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.609822 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-utilities\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.609926 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-catalog-content\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.642656 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brkbc\" (UniqueName: \"kubernetes.io/projected/31af8111-914e-4844-b376-40f7f0654df0-kube-api-access-brkbc\") pod \"redhat-marketplace-4c7cj\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:46 crc kubenswrapper[4954]: I1128 16:45:46.675335 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:47 crc kubenswrapper[4954]: I1128 16:45:47.227520 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4c7cj"] Nov 28 16:45:47 crc kubenswrapper[4954]: I1128 16:45:47.643182 4954 generic.go:334] "Generic (PLEG): container finished" podID="31af8111-914e-4844-b376-40f7f0654df0" containerID="99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201" exitCode=0 Nov 28 16:45:47 crc kubenswrapper[4954]: I1128 16:45:47.643574 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4c7cj" event={"ID":"31af8111-914e-4844-b376-40f7f0654df0","Type":"ContainerDied","Data":"99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201"} Nov 28 16:45:47 crc kubenswrapper[4954]: I1128 16:45:47.643607 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4c7cj" event={"ID":"31af8111-914e-4844-b376-40f7f0654df0","Type":"ContainerStarted","Data":"4f4fbdec375aa908b9d3ee3ab4dc02e7440b7607a460af2e057ed21bfee446c6"} Nov 28 16:45:49 crc kubenswrapper[4954]: I1128 16:45:49.661651 4954 generic.go:334] "Generic (PLEG): container finished" podID="31af8111-914e-4844-b376-40f7f0654df0" containerID="0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4" exitCode=0 Nov 28 16:45:49 crc kubenswrapper[4954]: I1128 16:45:49.661755 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4c7cj" event={"ID":"31af8111-914e-4844-b376-40f7f0654df0","Type":"ContainerDied","Data":"0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4"} Nov 28 16:45:50 crc kubenswrapper[4954]: I1128 16:45:50.673274 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4c7cj" event={"ID":"31af8111-914e-4844-b376-40f7f0654df0","Type":"ContainerStarted","Data":"0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55"} Nov 28 16:45:50 crc kubenswrapper[4954]: I1128 16:45:50.700875 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4c7cj" podStartSLOduration=2.050100752 podStartE2EDuration="4.700852409s" podCreationTimestamp="2025-11-28 16:45:46 +0000 UTC" firstStartedPulling="2025-11-28 16:45:47.644954545 +0000 UTC m=+2101.036623086" lastFinishedPulling="2025-11-28 16:45:50.295706202 +0000 UTC m=+2103.687374743" observedRunningTime="2025-11-28 16:45:50.694062545 +0000 UTC m=+2104.085731116" watchObservedRunningTime="2025-11-28 16:45:50.700852409 +0000 UTC m=+2104.092520970" Nov 28 16:45:56 crc kubenswrapper[4954]: I1128 16:45:56.677232 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:56 crc kubenswrapper[4954]: I1128 16:45:56.677775 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:56 crc kubenswrapper[4954]: I1128 16:45:56.743707 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:56 crc kubenswrapper[4954]: I1128 16:45:56.794468 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:56 crc kubenswrapper[4954]: I1128 16:45:56.980106 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4c7cj"] Nov 28 16:45:58 crc kubenswrapper[4954]: I1128 16:45:58.732172 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4c7cj" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="registry-server" containerID="cri-o://0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55" gracePeriod=2 Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.179345 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.200736 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-utilities\") pod \"31af8111-914e-4844-b376-40f7f0654df0\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.202144 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-utilities" (OuterVolumeSpecName: "utilities") pod "31af8111-914e-4844-b376-40f7f0654df0" (UID: "31af8111-914e-4844-b376-40f7f0654df0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.302024 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brkbc\" (UniqueName: \"kubernetes.io/projected/31af8111-914e-4844-b376-40f7f0654df0-kube-api-access-brkbc\") pod \"31af8111-914e-4844-b376-40f7f0654df0\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.302347 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-catalog-content\") pod \"31af8111-914e-4844-b376-40f7f0654df0\" (UID: \"31af8111-914e-4844-b376-40f7f0654df0\") " Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.302598 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.309082 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31af8111-914e-4844-b376-40f7f0654df0-kube-api-access-brkbc" (OuterVolumeSpecName: "kube-api-access-brkbc") pod "31af8111-914e-4844-b376-40f7f0654df0" (UID: "31af8111-914e-4844-b376-40f7f0654df0"). InnerVolumeSpecName "kube-api-access-brkbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.322352 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31af8111-914e-4844-b376-40f7f0654df0" (UID: "31af8111-914e-4844-b376-40f7f0654df0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.403739 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brkbc\" (UniqueName: \"kubernetes.io/projected/31af8111-914e-4844-b376-40f7f0654df0-kube-api-access-brkbc\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.403782 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31af8111-914e-4844-b376-40f7f0654df0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.739085 4954 generic.go:334] "Generic (PLEG): container finished" podID="31af8111-914e-4844-b376-40f7f0654df0" containerID="0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55" exitCode=0 Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.739120 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4c7cj" event={"ID":"31af8111-914e-4844-b376-40f7f0654df0","Type":"ContainerDied","Data":"0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55"} Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.739143 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4c7cj" event={"ID":"31af8111-914e-4844-b376-40f7f0654df0","Type":"ContainerDied","Data":"4f4fbdec375aa908b9d3ee3ab4dc02e7440b7607a460af2e057ed21bfee446c6"} Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.739142 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4c7cj" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.739166 4954 scope.go:117] "RemoveContainer" containerID="0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.778776 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4c7cj"] Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.781035 4954 scope.go:117] "RemoveContainer" containerID="0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.788177 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4c7cj"] Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.803269 4954 scope.go:117] "RemoveContainer" containerID="99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.827488 4954 scope.go:117] "RemoveContainer" containerID="0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55" Nov 28 16:45:59 crc kubenswrapper[4954]: E1128 16:45:59.827890 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55\": container with ID starting with 0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55 not found: ID does not exist" containerID="0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.827926 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55"} err="failed to get container status \"0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55\": rpc error: code = NotFound desc = could not find container \"0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55\": container with ID starting with 0b6ea5050f08e4abafa02503677e51b6560c80e7ac31ec6159b58b5c5fd64f55 not found: ID does not exist" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.827947 4954 scope.go:117] "RemoveContainer" containerID="0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4" Nov 28 16:45:59 crc kubenswrapper[4954]: E1128 16:45:59.828170 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4\": container with ID starting with 0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4 not found: ID does not exist" containerID="0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.828194 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4"} err="failed to get container status \"0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4\": rpc error: code = NotFound desc = could not find container \"0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4\": container with ID starting with 0d1ee300cf67717f7a93a970d2e87fd408c50efb59ae6941900c2aa9a2febca4 not found: ID does not exist" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.828232 4954 scope.go:117] "RemoveContainer" containerID="99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201" Nov 28 16:45:59 crc kubenswrapper[4954]: E1128 16:45:59.828490 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201\": container with ID starting with 99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201 not found: ID does not exist" containerID="99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.828615 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201"} err="failed to get container status \"99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201\": rpc error: code = NotFound desc = could not find container \"99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201\": container with ID starting with 99204886873b2d8665d63043f8fa6961d29d8eb37be054d2eccfd5a5048a2201 not found: ID does not exist" Nov 28 16:45:59 crc kubenswrapper[4954]: I1128 16:45:59.866337 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31af8111-914e-4844-b376-40f7f0654df0" path="/var/lib/kubelet/pods/31af8111-914e-4844-b376-40f7f0654df0/volumes" Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.480723 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.481081 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.481403 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.482026 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d50a8efc4b1b81210ca4bb44dd5e48836ad1dfd0a22eb35b09931efb376480fa"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.482091 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://d50a8efc4b1b81210ca4bb44dd5e48836ad1dfd0a22eb35b09931efb376480fa" gracePeriod=600 Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.765019 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="d50a8efc4b1b81210ca4bb44dd5e48836ad1dfd0a22eb35b09931efb376480fa" exitCode=0 Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.765068 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"d50a8efc4b1b81210ca4bb44dd5e48836ad1dfd0a22eb35b09931efb376480fa"} Nov 28 16:46:02 crc kubenswrapper[4954]: I1128 16:46:02.765109 4954 scope.go:117] "RemoveContainer" containerID="418d926ed9b2e6ed1f67666714411eeeabc9d0c2a2747b7e7e47be79c0af8352" Nov 28 16:46:03 crc kubenswrapper[4954]: I1128 16:46:03.777637 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba"} Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.672786 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7sx5c"] Nov 28 16:46:39 crc kubenswrapper[4954]: E1128 16:46:39.673863 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="extract-content" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.673885 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="extract-content" Nov 28 16:46:39 crc kubenswrapper[4954]: E1128 16:46:39.673923 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="registry-server" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.673936 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="registry-server" Nov 28 16:46:39 crc kubenswrapper[4954]: E1128 16:46:39.673969 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="extract-utilities" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.673980 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="extract-utilities" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.674210 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="31af8111-914e-4844-b376-40f7f0654df0" containerName="registry-server" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.677363 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.686474 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7sx5c"] Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.820054 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-catalog-content\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.820123 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m9hh\" (UniqueName: \"kubernetes.io/projected/a9a3be40-b420-4c69-a92e-f405733ac9c4-kube-api-access-4m9hh\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.820196 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-utilities\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.922091 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m9hh\" (UniqueName: \"kubernetes.io/projected/a9a3be40-b420-4c69-a92e-f405733ac9c4-kube-api-access-4m9hh\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.922218 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-utilities\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.922281 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-catalog-content\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.922780 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-utilities\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.922837 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-catalog-content\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:39 crc kubenswrapper[4954]: I1128 16:46:39.942541 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m9hh\" (UniqueName: \"kubernetes.io/projected/a9a3be40-b420-4c69-a92e-f405733ac9c4-kube-api-access-4m9hh\") pod \"community-operators-7sx5c\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:40 crc kubenswrapper[4954]: I1128 16:46:40.008836 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:40 crc kubenswrapper[4954]: I1128 16:46:40.489591 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7sx5c"] Nov 28 16:46:41 crc kubenswrapper[4954]: I1128 16:46:41.087041 4954 generic.go:334] "Generic (PLEG): container finished" podID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerID="95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c" exitCode=0 Nov 28 16:46:41 crc kubenswrapper[4954]: I1128 16:46:41.087138 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sx5c" event={"ID":"a9a3be40-b420-4c69-a92e-f405733ac9c4","Type":"ContainerDied","Data":"95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c"} Nov 28 16:46:41 crc kubenswrapper[4954]: I1128 16:46:41.087380 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sx5c" event={"ID":"a9a3be40-b420-4c69-a92e-f405733ac9c4","Type":"ContainerStarted","Data":"983094c7b5105ba8c683d715754ca52c39aa77e60dab1bb2228dbab81d1d1b1a"} Nov 28 16:46:43 crc kubenswrapper[4954]: I1128 16:46:43.105854 4954 generic.go:334] "Generic (PLEG): container finished" podID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerID="4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184" exitCode=0 Nov 28 16:46:43 crc kubenswrapper[4954]: I1128 16:46:43.105949 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sx5c" event={"ID":"a9a3be40-b420-4c69-a92e-f405733ac9c4","Type":"ContainerDied","Data":"4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184"} Nov 28 16:46:44 crc kubenswrapper[4954]: I1128 16:46:44.119747 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sx5c" event={"ID":"a9a3be40-b420-4c69-a92e-f405733ac9c4","Type":"ContainerStarted","Data":"1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35"} Nov 28 16:46:45 crc kubenswrapper[4954]: I1128 16:46:45.855149 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7sx5c" podStartSLOduration=4.302219486 podStartE2EDuration="6.855119377s" podCreationTimestamp="2025-11-28 16:46:39 +0000 UTC" firstStartedPulling="2025-11-28 16:46:41.088369587 +0000 UTC m=+2154.480038148" lastFinishedPulling="2025-11-28 16:46:43.641269488 +0000 UTC m=+2157.032938039" observedRunningTime="2025-11-28 16:46:44.14771559 +0000 UTC m=+2157.539384161" watchObservedRunningTime="2025-11-28 16:46:45.855119377 +0000 UTC m=+2159.246787918" Nov 28 16:46:45 crc kubenswrapper[4954]: I1128 16:46:45.866413 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-msrxn"] Nov 28 16:46:45 crc kubenswrapper[4954]: I1128 16:46:45.868187 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:45 crc kubenswrapper[4954]: I1128 16:46:45.871623 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-msrxn"] Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.009652 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-catalog-content\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.009765 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-utilities\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.009827 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ssz2\" (UniqueName: \"kubernetes.io/projected/871003d1-4abc-4a18-aed7-f905c86ee10a-kube-api-access-5ssz2\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.111001 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-utilities\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.111091 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ssz2\" (UniqueName: \"kubernetes.io/projected/871003d1-4abc-4a18-aed7-f905c86ee10a-kube-api-access-5ssz2\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.111145 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-catalog-content\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.111498 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-utilities\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.111610 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-catalog-content\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.132371 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ssz2\" (UniqueName: \"kubernetes.io/projected/871003d1-4abc-4a18-aed7-f905c86ee10a-kube-api-access-5ssz2\") pod \"certified-operators-msrxn\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.202119 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:46 crc kubenswrapper[4954]: I1128 16:46:46.650386 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-msrxn"] Nov 28 16:46:47 crc kubenswrapper[4954]: I1128 16:46:47.146563 4954 generic.go:334] "Generic (PLEG): container finished" podID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerID="45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3" exitCode=0 Nov 28 16:46:47 crc kubenswrapper[4954]: I1128 16:46:47.146656 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msrxn" event={"ID":"871003d1-4abc-4a18-aed7-f905c86ee10a","Type":"ContainerDied","Data":"45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3"} Nov 28 16:46:47 crc kubenswrapper[4954]: I1128 16:46:47.146918 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msrxn" event={"ID":"871003d1-4abc-4a18-aed7-f905c86ee10a","Type":"ContainerStarted","Data":"01f23d44a590fd348b8b55a6ca98ba9730621eb8f9111704a42a8c1bd29c0628"} Nov 28 16:46:49 crc kubenswrapper[4954]: I1128 16:46:49.161743 4954 generic.go:334] "Generic (PLEG): container finished" podID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerID="80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43" exitCode=0 Nov 28 16:46:49 crc kubenswrapper[4954]: I1128 16:46:49.161810 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msrxn" event={"ID":"871003d1-4abc-4a18-aed7-f905c86ee10a","Type":"ContainerDied","Data":"80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43"} Nov 28 16:46:50 crc kubenswrapper[4954]: I1128 16:46:50.009354 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:50 crc kubenswrapper[4954]: I1128 16:46:50.009665 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:50 crc kubenswrapper[4954]: I1128 16:46:50.057731 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:50 crc kubenswrapper[4954]: I1128 16:46:50.173711 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msrxn" event={"ID":"871003d1-4abc-4a18-aed7-f905c86ee10a","Type":"ContainerStarted","Data":"28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229"} Nov 28 16:46:50 crc kubenswrapper[4954]: I1128 16:46:50.197278 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-msrxn" podStartSLOduration=2.638880372 podStartE2EDuration="5.197256645s" podCreationTimestamp="2025-11-28 16:46:45 +0000 UTC" firstStartedPulling="2025-11-28 16:46:47.14797565 +0000 UTC m=+2160.539644201" lastFinishedPulling="2025-11-28 16:46:49.706351933 +0000 UTC m=+2163.098020474" observedRunningTime="2025-11-28 16:46:50.196035446 +0000 UTC m=+2163.587703987" watchObservedRunningTime="2025-11-28 16:46:50.197256645 +0000 UTC m=+2163.588925186" Nov 28 16:46:50 crc kubenswrapper[4954]: I1128 16:46:50.221377 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.249631 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7sx5c"] Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.250900 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7sx5c" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="registry-server" containerID="cri-o://1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35" gracePeriod=2 Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.639203 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.721156 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-catalog-content\") pod \"a9a3be40-b420-4c69-a92e-f405733ac9c4\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.721221 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m9hh\" (UniqueName: \"kubernetes.io/projected/a9a3be40-b420-4c69-a92e-f405733ac9c4-kube-api-access-4m9hh\") pod \"a9a3be40-b420-4c69-a92e-f405733ac9c4\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.721299 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-utilities\") pod \"a9a3be40-b420-4c69-a92e-f405733ac9c4\" (UID: \"a9a3be40-b420-4c69-a92e-f405733ac9c4\") " Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.722437 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-utilities" (OuterVolumeSpecName: "utilities") pod "a9a3be40-b420-4c69-a92e-f405733ac9c4" (UID: "a9a3be40-b420-4c69-a92e-f405733ac9c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.730078 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9a3be40-b420-4c69-a92e-f405733ac9c4-kube-api-access-4m9hh" (OuterVolumeSpecName: "kube-api-access-4m9hh") pod "a9a3be40-b420-4c69-a92e-f405733ac9c4" (UID: "a9a3be40-b420-4c69-a92e-f405733ac9c4"). InnerVolumeSpecName "kube-api-access-4m9hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.816759 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9a3be40-b420-4c69-a92e-f405733ac9c4" (UID: "a9a3be40-b420-4c69-a92e-f405733ac9c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.822417 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.822445 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m9hh\" (UniqueName: \"kubernetes.io/projected/a9a3be40-b420-4c69-a92e-f405733ac9c4-kube-api-access-4m9hh\") on node \"crc\" DevicePath \"\"" Nov 28 16:46:52 crc kubenswrapper[4954]: I1128 16:46:52.822466 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9a3be40-b420-4c69-a92e-f405733ac9c4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.206460 4954 generic.go:334] "Generic (PLEG): container finished" podID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerID="1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35" exitCode=0 Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.206513 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sx5c" event={"ID":"a9a3be40-b420-4c69-a92e-f405733ac9c4","Type":"ContainerDied","Data":"1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35"} Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.206567 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7sx5c" event={"ID":"a9a3be40-b420-4c69-a92e-f405733ac9c4","Type":"ContainerDied","Data":"983094c7b5105ba8c683d715754ca52c39aa77e60dab1bb2228dbab81d1d1b1a"} Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.206590 4954 scope.go:117] "RemoveContainer" containerID="1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.206750 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7sx5c" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.240829 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7sx5c"] Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.242543 4954 scope.go:117] "RemoveContainer" containerID="4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.248360 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7sx5c"] Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.272807 4954 scope.go:117] "RemoveContainer" containerID="95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.290453 4954 scope.go:117] "RemoveContainer" containerID="1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35" Nov 28 16:46:53 crc kubenswrapper[4954]: E1128 16:46:53.290926 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35\": container with ID starting with 1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35 not found: ID does not exist" containerID="1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.290970 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35"} err="failed to get container status \"1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35\": rpc error: code = NotFound desc = could not find container \"1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35\": container with ID starting with 1b8c82a09740926ca852dc37255908148d06fb7d118416517ab642c36df08e35 not found: ID does not exist" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.291002 4954 scope.go:117] "RemoveContainer" containerID="4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184" Nov 28 16:46:53 crc kubenswrapper[4954]: E1128 16:46:53.291318 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184\": container with ID starting with 4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184 not found: ID does not exist" containerID="4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.291353 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184"} err="failed to get container status \"4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184\": rpc error: code = NotFound desc = could not find container \"4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184\": container with ID starting with 4ecbf17bdc4183fd188af312b74ed988f005d858729f49cd7633b8b3f7c96184 not found: ID does not exist" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.291385 4954 scope.go:117] "RemoveContainer" containerID="95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c" Nov 28 16:46:53 crc kubenswrapper[4954]: E1128 16:46:53.291738 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c\": container with ID starting with 95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c not found: ID does not exist" containerID="95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.291782 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c"} err="failed to get container status \"95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c\": rpc error: code = NotFound desc = could not find container \"95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c\": container with ID starting with 95de8eed699a15f73047e0ee4da95e5a6dc4389556dc62064f6a579dbf24082c not found: ID does not exist" Nov 28 16:46:53 crc kubenswrapper[4954]: I1128 16:46:53.876655 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" path="/var/lib/kubelet/pods/a9a3be40-b420-4c69-a92e-f405733ac9c4/volumes" Nov 28 16:46:56 crc kubenswrapper[4954]: I1128 16:46:56.202418 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:56 crc kubenswrapper[4954]: I1128 16:46:56.202719 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:56 crc kubenswrapper[4954]: I1128 16:46:56.243656 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:56 crc kubenswrapper[4954]: I1128 16:46:56.284492 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:57 crc kubenswrapper[4954]: I1128 16:46:57.444656 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-msrxn"] Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.241131 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-msrxn" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="registry-server" containerID="cri-o://28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229" gracePeriod=2 Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.637919 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.707560 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-utilities\") pod \"871003d1-4abc-4a18-aed7-f905c86ee10a\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.707732 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-catalog-content\") pod \"871003d1-4abc-4a18-aed7-f905c86ee10a\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.707853 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ssz2\" (UniqueName: \"kubernetes.io/projected/871003d1-4abc-4a18-aed7-f905c86ee10a-kube-api-access-5ssz2\") pod \"871003d1-4abc-4a18-aed7-f905c86ee10a\" (UID: \"871003d1-4abc-4a18-aed7-f905c86ee10a\") " Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.709013 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-utilities" (OuterVolumeSpecName: "utilities") pod "871003d1-4abc-4a18-aed7-f905c86ee10a" (UID: "871003d1-4abc-4a18-aed7-f905c86ee10a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.715297 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/871003d1-4abc-4a18-aed7-f905c86ee10a-kube-api-access-5ssz2" (OuterVolumeSpecName: "kube-api-access-5ssz2") pod "871003d1-4abc-4a18-aed7-f905c86ee10a" (UID: "871003d1-4abc-4a18-aed7-f905c86ee10a"). InnerVolumeSpecName "kube-api-access-5ssz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.768227 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "871003d1-4abc-4a18-aed7-f905c86ee10a" (UID: "871003d1-4abc-4a18-aed7-f905c86ee10a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.809581 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.809616 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ssz2\" (UniqueName: \"kubernetes.io/projected/871003d1-4abc-4a18-aed7-f905c86ee10a-kube-api-access-5ssz2\") on node \"crc\" DevicePath \"\"" Nov 28 16:46:58 crc kubenswrapper[4954]: I1128 16:46:58.809630 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/871003d1-4abc-4a18-aed7-f905c86ee10a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.252384 4954 generic.go:334] "Generic (PLEG): container finished" podID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerID="28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229" exitCode=0 Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.252433 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msrxn" event={"ID":"871003d1-4abc-4a18-aed7-f905c86ee10a","Type":"ContainerDied","Data":"28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229"} Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.252453 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msrxn" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.252465 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msrxn" event={"ID":"871003d1-4abc-4a18-aed7-f905c86ee10a","Type":"ContainerDied","Data":"01f23d44a590fd348b8b55a6ca98ba9730621eb8f9111704a42a8c1bd29c0628"} Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.252490 4954 scope.go:117] "RemoveContainer" containerID="28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.289734 4954 scope.go:117] "RemoveContainer" containerID="80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.292243 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-msrxn"] Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.305329 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-msrxn"] Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.317169 4954 scope.go:117] "RemoveContainer" containerID="45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.336957 4954 scope.go:117] "RemoveContainer" containerID="28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229" Nov 28 16:46:59 crc kubenswrapper[4954]: E1128 16:46:59.337409 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229\": container with ID starting with 28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229 not found: ID does not exist" containerID="28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.337460 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229"} err="failed to get container status \"28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229\": rpc error: code = NotFound desc = could not find container \"28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229\": container with ID starting with 28f7fd2431d08a9bbedd003e6bc53e210f9731c24ebfa1337068edec4bfec229 not found: ID does not exist" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.337491 4954 scope.go:117] "RemoveContainer" containerID="80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43" Nov 28 16:46:59 crc kubenswrapper[4954]: E1128 16:46:59.337941 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43\": container with ID starting with 80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43 not found: ID does not exist" containerID="80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.337979 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43"} err="failed to get container status \"80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43\": rpc error: code = NotFound desc = could not find container \"80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43\": container with ID starting with 80e1215225269ad69457e03715ea4416206e500803275ed1eb2d1ab1ec94ae43 not found: ID does not exist" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.338002 4954 scope.go:117] "RemoveContainer" containerID="45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3" Nov 28 16:46:59 crc kubenswrapper[4954]: E1128 16:46:59.338343 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3\": container with ID starting with 45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3 not found: ID does not exist" containerID="45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.338379 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3"} err="failed to get container status \"45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3\": rpc error: code = NotFound desc = could not find container \"45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3\": container with ID starting with 45c1363bd1a8efb3ec21c4817217f83fff42657678037b90ef8b38b5521081a3 not found: ID does not exist" Nov 28 16:46:59 crc kubenswrapper[4954]: I1128 16:46:59.868641 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" path="/var/lib/kubelet/pods/871003d1-4abc-4a18-aed7-f905c86ee10a/volumes" Nov 28 16:48:32 crc kubenswrapper[4954]: I1128 16:48:32.480791 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:48:32 crc kubenswrapper[4954]: I1128 16:48:32.481357 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:49:02 crc kubenswrapper[4954]: I1128 16:49:02.480327 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:49:02 crc kubenswrapper[4954]: I1128 16:49:02.481248 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.480989 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.481468 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.481518 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.482153 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.482214 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" gracePeriod=600 Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.794062 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" exitCode=0 Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.794112 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba"} Nov 28 16:49:32 crc kubenswrapper[4954]: I1128 16:49:32.795088 4954 scope.go:117] "RemoveContainer" containerID="d50a8efc4b1b81210ca4bb44dd5e48836ad1dfd0a22eb35b09931efb376480fa" Nov 28 16:49:33 crc kubenswrapper[4954]: E1128 16:49:33.171958 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:49:33 crc kubenswrapper[4954]: I1128 16:49:33.812882 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:49:33 crc kubenswrapper[4954]: E1128 16:49:33.813234 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:49:46 crc kubenswrapper[4954]: I1128 16:49:46.855994 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:49:46 crc kubenswrapper[4954]: E1128 16:49:46.856831 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:49:59 crc kubenswrapper[4954]: I1128 16:49:59.856062 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:49:59 crc kubenswrapper[4954]: E1128 16:49:59.856805 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:50:14 crc kubenswrapper[4954]: I1128 16:50:14.857262 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:50:14 crc kubenswrapper[4954]: E1128 16:50:14.858141 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:50:27 crc kubenswrapper[4954]: I1128 16:50:27.863849 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:50:27 crc kubenswrapper[4954]: E1128 16:50:27.865060 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:50:41 crc kubenswrapper[4954]: I1128 16:50:41.855733 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:50:41 crc kubenswrapper[4954]: E1128 16:50:41.856558 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:50:55 crc kubenswrapper[4954]: I1128 16:50:55.856695 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:50:55 crc kubenswrapper[4954]: E1128 16:50:55.857727 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:51:10 crc kubenswrapper[4954]: I1128 16:51:10.856632 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:51:10 crc kubenswrapper[4954]: E1128 16:51:10.857708 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:51:22 crc kubenswrapper[4954]: I1128 16:51:22.856195 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:51:22 crc kubenswrapper[4954]: E1128 16:51:22.857002 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:51:36 crc kubenswrapper[4954]: I1128 16:51:36.856226 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:51:36 crc kubenswrapper[4954]: E1128 16:51:36.857505 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:51:48 crc kubenswrapper[4954]: I1128 16:51:48.856601 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:51:48 crc kubenswrapper[4954]: E1128 16:51:48.857843 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:52:00 crc kubenswrapper[4954]: I1128 16:52:00.855637 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:52:00 crc kubenswrapper[4954]: E1128 16:52:00.856402 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:52:14 crc kubenswrapper[4954]: I1128 16:52:14.855893 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:52:14 crc kubenswrapper[4954]: E1128 16:52:14.856906 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:52:25 crc kubenswrapper[4954]: I1128 16:52:25.856113 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:52:25 crc kubenswrapper[4954]: E1128 16:52:25.857927 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:52:38 crc kubenswrapper[4954]: I1128 16:52:38.856653 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:52:38 crc kubenswrapper[4954]: E1128 16:52:38.857650 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:52:50 crc kubenswrapper[4954]: I1128 16:52:50.856770 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:52:50 crc kubenswrapper[4954]: E1128 16:52:50.857930 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:53:02 crc kubenswrapper[4954]: I1128 16:53:02.856206 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:53:02 crc kubenswrapper[4954]: E1128 16:53:02.857995 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:53:16 crc kubenswrapper[4954]: I1128 16:53:16.855869 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:53:16 crc kubenswrapper[4954]: E1128 16:53:16.856587 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:53:31 crc kubenswrapper[4954]: I1128 16:53:31.856976 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:53:31 crc kubenswrapper[4954]: E1128 16:53:31.857838 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:53:43 crc kubenswrapper[4954]: I1128 16:53:43.856146 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:53:43 crc kubenswrapper[4954]: E1128 16:53:43.856863 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:53:58 crc kubenswrapper[4954]: I1128 16:53:58.856850 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:53:58 crc kubenswrapper[4954]: E1128 16:53:58.857824 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:54:10 crc kubenswrapper[4954]: I1128 16:54:10.856630 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:54:10 crc kubenswrapper[4954]: E1128 16:54:10.857455 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:54:21 crc kubenswrapper[4954]: I1128 16:54:21.856082 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:54:21 crc kubenswrapper[4954]: E1128 16:54:21.856813 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 16:54:36 crc kubenswrapper[4954]: I1128 16:54:36.856630 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:54:37 crc kubenswrapper[4954]: I1128 16:54:37.358887 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"4310770654c95c53af0403e7697612204f526223706a7050b7dbdb48cdbd9ff2"} Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.731477 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8c7c7"] Nov 28 16:56:02 crc kubenswrapper[4954]: E1128 16:56:02.732842 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="registry-server" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.732874 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="registry-server" Nov 28 16:56:02 crc kubenswrapper[4954]: E1128 16:56:02.732911 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="extract-content" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.732928 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="extract-content" Nov 28 16:56:02 crc kubenswrapper[4954]: E1128 16:56:02.732951 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="registry-server" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.732968 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="registry-server" Nov 28 16:56:02 crc kubenswrapper[4954]: E1128 16:56:02.732992 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="extract-utilities" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.733008 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="extract-utilities" Nov 28 16:56:02 crc kubenswrapper[4954]: E1128 16:56:02.733037 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="extract-content" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.733054 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="extract-content" Nov 28 16:56:02 crc kubenswrapper[4954]: E1128 16:56:02.733090 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="extract-utilities" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.733107 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="extract-utilities" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.733455 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="871003d1-4abc-4a18-aed7-f905c86ee10a" containerName="registry-server" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.733495 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9a3be40-b420-4c69-a92e-f405733ac9c4" containerName="registry-server" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.736122 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.750757 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8c7c7"] Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.833088 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvpxm\" (UniqueName: \"kubernetes.io/projected/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-kube-api-access-hvpxm\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.833237 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-catalog-content\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.833302 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-utilities\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.934470 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-utilities\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.934589 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvpxm\" (UniqueName: \"kubernetes.io/projected/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-kube-api-access-hvpxm\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.935240 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-utilities\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.935787 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-catalog-content\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.936047 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-catalog-content\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:02 crc kubenswrapper[4954]: I1128 16:56:02.959607 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvpxm\" (UniqueName: \"kubernetes.io/projected/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-kube-api-access-hvpxm\") pod \"redhat-marketplace-8c7c7\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:03 crc kubenswrapper[4954]: I1128 16:56:03.064920 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:03 crc kubenswrapper[4954]: I1128 16:56:03.508621 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8c7c7"] Nov 28 16:56:03 crc kubenswrapper[4954]: W1128 16:56:03.512270 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1cdf74d1_fbac_4091_a2a1_c36b972d51e9.slice/crio-699fdd791be5428e19792124294703182e3bba2461a49c446f7380c4b07f1e50 WatchSource:0}: Error finding container 699fdd791be5428e19792124294703182e3bba2461a49c446f7380c4b07f1e50: Status 404 returned error can't find the container with id 699fdd791be5428e19792124294703182e3bba2461a49c446f7380c4b07f1e50 Nov 28 16:56:04 crc kubenswrapper[4954]: I1128 16:56:04.072175 4954 generic.go:334] "Generic (PLEG): container finished" podID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerID="e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340" exitCode=0 Nov 28 16:56:04 crc kubenswrapper[4954]: I1128 16:56:04.072222 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8c7c7" event={"ID":"1cdf74d1-fbac-4091-a2a1-c36b972d51e9","Type":"ContainerDied","Data":"e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340"} Nov 28 16:56:04 crc kubenswrapper[4954]: I1128 16:56:04.072252 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8c7c7" event={"ID":"1cdf74d1-fbac-4091-a2a1-c36b972d51e9","Type":"ContainerStarted","Data":"699fdd791be5428e19792124294703182e3bba2461a49c446f7380c4b07f1e50"} Nov 28 16:56:04 crc kubenswrapper[4954]: I1128 16:56:04.074166 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:56:05 crc kubenswrapper[4954]: I1128 16:56:05.081448 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8c7c7" event={"ID":"1cdf74d1-fbac-4091-a2a1-c36b972d51e9","Type":"ContainerStarted","Data":"da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b"} Nov 28 16:56:06 crc kubenswrapper[4954]: I1128 16:56:06.092661 4954 generic.go:334] "Generic (PLEG): container finished" podID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerID="da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b" exitCode=0 Nov 28 16:56:06 crc kubenswrapper[4954]: I1128 16:56:06.092730 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8c7c7" event={"ID":"1cdf74d1-fbac-4091-a2a1-c36b972d51e9","Type":"ContainerDied","Data":"da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b"} Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.107605 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8c7c7" event={"ID":"1cdf74d1-fbac-4091-a2a1-c36b972d51e9","Type":"ContainerStarted","Data":"21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0"} Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.139045 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8c7c7" podStartSLOduration=2.56357926 podStartE2EDuration="5.139016555s" podCreationTimestamp="2025-11-28 16:56:02 +0000 UTC" firstStartedPulling="2025-11-28 16:56:04.073976759 +0000 UTC m=+2717.465645300" lastFinishedPulling="2025-11-28 16:56:06.649414024 +0000 UTC m=+2720.041082595" observedRunningTime="2025-11-28 16:56:07.128126762 +0000 UTC m=+2720.519795303" watchObservedRunningTime="2025-11-28 16:56:07.139016555 +0000 UTC m=+2720.530685136" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.502193 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2m24q"] Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.503927 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.525459 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2m24q"] Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.602343 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf8dl\" (UniqueName: \"kubernetes.io/projected/9aefc690-47ee-419d-9126-dfe98bc3d6f5-kube-api-access-cf8dl\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.602573 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-catalog-content\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.602725 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-utilities\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.704054 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-catalog-content\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.704586 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-catalog-content\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.704595 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-utilities\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.704816 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf8dl\" (UniqueName: \"kubernetes.io/projected/9aefc690-47ee-419d-9126-dfe98bc3d6f5-kube-api-access-cf8dl\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.705096 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-utilities\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.725497 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf8dl\" (UniqueName: \"kubernetes.io/projected/9aefc690-47ee-419d-9126-dfe98bc3d6f5-kube-api-access-cf8dl\") pod \"redhat-operators-2m24q\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:07 crc kubenswrapper[4954]: I1128 16:56:07.833822 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:08 crc kubenswrapper[4954]: I1128 16:56:08.305589 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2m24q"] Nov 28 16:56:09 crc kubenswrapper[4954]: I1128 16:56:09.124470 4954 generic.go:334] "Generic (PLEG): container finished" podID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerID="4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004" exitCode=0 Nov 28 16:56:09 crc kubenswrapper[4954]: I1128 16:56:09.124615 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2m24q" event={"ID":"9aefc690-47ee-419d-9126-dfe98bc3d6f5","Type":"ContainerDied","Data":"4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004"} Nov 28 16:56:09 crc kubenswrapper[4954]: I1128 16:56:09.124889 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2m24q" event={"ID":"9aefc690-47ee-419d-9126-dfe98bc3d6f5","Type":"ContainerStarted","Data":"a64b4a583080543032d3f47abd8b96f2005fe0aae3e1b5954e640672b5c7ee0a"} Nov 28 16:56:11 crc kubenswrapper[4954]: I1128 16:56:11.149576 4954 generic.go:334] "Generic (PLEG): container finished" podID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerID="4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd" exitCode=0 Nov 28 16:56:11 crc kubenswrapper[4954]: I1128 16:56:11.149664 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2m24q" event={"ID":"9aefc690-47ee-419d-9126-dfe98bc3d6f5","Type":"ContainerDied","Data":"4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd"} Nov 28 16:56:12 crc kubenswrapper[4954]: I1128 16:56:12.160176 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2m24q" event={"ID":"9aefc690-47ee-419d-9126-dfe98bc3d6f5","Type":"ContainerStarted","Data":"b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b"} Nov 28 16:56:12 crc kubenswrapper[4954]: I1128 16:56:12.181284 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2m24q" podStartSLOduration=2.6945894839999998 podStartE2EDuration="5.18126632s" podCreationTimestamp="2025-11-28 16:56:07 +0000 UTC" firstStartedPulling="2025-11-28 16:56:09.126667013 +0000 UTC m=+2722.518335554" lastFinishedPulling="2025-11-28 16:56:11.613343809 +0000 UTC m=+2725.005012390" observedRunningTime="2025-11-28 16:56:12.177850562 +0000 UTC m=+2725.569519103" watchObservedRunningTime="2025-11-28 16:56:12.18126632 +0000 UTC m=+2725.572934861" Nov 28 16:56:13 crc kubenswrapper[4954]: I1128 16:56:13.065286 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:13 crc kubenswrapper[4954]: I1128 16:56:13.065365 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:13 crc kubenswrapper[4954]: I1128 16:56:13.128977 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:13 crc kubenswrapper[4954]: I1128 16:56:13.223818 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:14 crc kubenswrapper[4954]: I1128 16:56:14.289116 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8c7c7"] Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.188887 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8c7c7" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="registry-server" containerID="cri-o://21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0" gracePeriod=2 Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.636618 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.773464 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-utilities\") pod \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.773666 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-catalog-content\") pod \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.773800 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvpxm\" (UniqueName: \"kubernetes.io/projected/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-kube-api-access-hvpxm\") pod \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\" (UID: \"1cdf74d1-fbac-4091-a2a1-c36b972d51e9\") " Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.774671 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-utilities" (OuterVolumeSpecName: "utilities") pod "1cdf74d1-fbac-4091-a2a1-c36b972d51e9" (UID: "1cdf74d1-fbac-4091-a2a1-c36b972d51e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.782785 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-kube-api-access-hvpxm" (OuterVolumeSpecName: "kube-api-access-hvpxm") pod "1cdf74d1-fbac-4091-a2a1-c36b972d51e9" (UID: "1cdf74d1-fbac-4091-a2a1-c36b972d51e9"). InnerVolumeSpecName "kube-api-access-hvpxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.876487 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:15 crc kubenswrapper[4954]: I1128 16:56:15.876578 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvpxm\" (UniqueName: \"kubernetes.io/projected/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-kube-api-access-hvpxm\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.199108 4954 generic.go:334] "Generic (PLEG): container finished" podID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerID="21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0" exitCode=0 Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.199186 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8c7c7" event={"ID":"1cdf74d1-fbac-4091-a2a1-c36b972d51e9","Type":"ContainerDied","Data":"21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0"} Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.199542 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8c7c7" event={"ID":"1cdf74d1-fbac-4091-a2a1-c36b972d51e9","Type":"ContainerDied","Data":"699fdd791be5428e19792124294703182e3bba2461a49c446f7380c4b07f1e50"} Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.199223 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8c7c7" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.199576 4954 scope.go:117] "RemoveContainer" containerID="21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.230281 4954 scope.go:117] "RemoveContainer" containerID="da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.260470 4954 scope.go:117] "RemoveContainer" containerID="e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.288508 4954 scope.go:117] "RemoveContainer" containerID="21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0" Nov 28 16:56:16 crc kubenswrapper[4954]: E1128 16:56:16.289009 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0\": container with ID starting with 21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0 not found: ID does not exist" containerID="21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.289059 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0"} err="failed to get container status \"21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0\": rpc error: code = NotFound desc = could not find container \"21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0\": container with ID starting with 21aadf664d290b87b2ddca840583a1293d8900973507e9cde0be9de85b9513f0 not found: ID does not exist" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.289087 4954 scope.go:117] "RemoveContainer" containerID="da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b" Nov 28 16:56:16 crc kubenswrapper[4954]: E1128 16:56:16.289437 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b\": container with ID starting with da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b not found: ID does not exist" containerID="da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.289517 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b"} err="failed to get container status \"da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b\": rpc error: code = NotFound desc = could not find container \"da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b\": container with ID starting with da8a888dfd1e066725ae92ff9675658cecc03c4529bc1cf6ebfc730f8e71be6b not found: ID does not exist" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.289597 4954 scope.go:117] "RemoveContainer" containerID="e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340" Nov 28 16:56:16 crc kubenswrapper[4954]: E1128 16:56:16.290005 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340\": container with ID starting with e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340 not found: ID does not exist" containerID="e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340" Nov 28 16:56:16 crc kubenswrapper[4954]: I1128 16:56:16.290040 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340"} err="failed to get container status \"e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340\": rpc error: code = NotFound desc = could not find container \"e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340\": container with ID starting with e2cfcfab1dc62abe5108d5d65001a803ed07b0bdac6b557519427e786a9c6340 not found: ID does not exist" Nov 28 16:56:17 crc kubenswrapper[4954]: I1128 16:56:17.834619 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:17 crc kubenswrapper[4954]: I1128 16:56:17.834698 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:17 crc kubenswrapper[4954]: I1128 16:56:17.877983 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1cdf74d1-fbac-4091-a2a1-c36b972d51e9" (UID: "1cdf74d1-fbac-4091-a2a1-c36b972d51e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:17 crc kubenswrapper[4954]: I1128 16:56:17.887845 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:17 crc kubenswrapper[4954]: I1128 16:56:17.902515 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cdf74d1-fbac-4091-a2a1-c36b972d51e9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:18 crc kubenswrapper[4954]: I1128 16:56:18.031293 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8c7c7"] Nov 28 16:56:18 crc kubenswrapper[4954]: I1128 16:56:18.036176 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8c7c7"] Nov 28 16:56:18 crc kubenswrapper[4954]: I1128 16:56:18.286549 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:19 crc kubenswrapper[4954]: I1128 16:56:19.307755 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2m24q"] Nov 28 16:56:19 crc kubenswrapper[4954]: I1128 16:56:19.872990 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" path="/var/lib/kubelet/pods/1cdf74d1-fbac-4091-a2a1-c36b972d51e9/volumes" Nov 28 16:56:20 crc kubenswrapper[4954]: I1128 16:56:20.230288 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2m24q" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="registry-server" containerID="cri-o://b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b" gracePeriod=2 Nov 28 16:56:21 crc kubenswrapper[4954]: I1128 16:56:21.824023 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:21 crc kubenswrapper[4954]: I1128 16:56:21.959185 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-utilities\") pod \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " Nov 28 16:56:21 crc kubenswrapper[4954]: I1128 16:56:21.959244 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-catalog-content\") pod \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " Nov 28 16:56:21 crc kubenswrapper[4954]: I1128 16:56:21.959378 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cf8dl\" (UniqueName: \"kubernetes.io/projected/9aefc690-47ee-419d-9126-dfe98bc3d6f5-kube-api-access-cf8dl\") pod \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\" (UID: \"9aefc690-47ee-419d-9126-dfe98bc3d6f5\") " Nov 28 16:56:21 crc kubenswrapper[4954]: I1128 16:56:21.960849 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-utilities" (OuterVolumeSpecName: "utilities") pod "9aefc690-47ee-419d-9126-dfe98bc3d6f5" (UID: "9aefc690-47ee-419d-9126-dfe98bc3d6f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:21 crc kubenswrapper[4954]: I1128 16:56:21.965719 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9aefc690-47ee-419d-9126-dfe98bc3d6f5-kube-api-access-cf8dl" (OuterVolumeSpecName: "kube-api-access-cf8dl") pod "9aefc690-47ee-419d-9126-dfe98bc3d6f5" (UID: "9aefc690-47ee-419d-9126-dfe98bc3d6f5"). InnerVolumeSpecName "kube-api-access-cf8dl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.061024 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cf8dl\" (UniqueName: \"kubernetes.io/projected/9aefc690-47ee-419d-9126-dfe98bc3d6f5-kube-api-access-cf8dl\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.061059 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.092642 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9aefc690-47ee-419d-9126-dfe98bc3d6f5" (UID: "9aefc690-47ee-419d-9126-dfe98bc3d6f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.162946 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9aefc690-47ee-419d-9126-dfe98bc3d6f5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.249864 4954 generic.go:334] "Generic (PLEG): container finished" podID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerID="b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b" exitCode=0 Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.249943 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2m24q" event={"ID":"9aefc690-47ee-419d-9126-dfe98bc3d6f5","Type":"ContainerDied","Data":"b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b"} Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.250004 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2m24q" event={"ID":"9aefc690-47ee-419d-9126-dfe98bc3d6f5","Type":"ContainerDied","Data":"a64b4a583080543032d3f47abd8b96f2005fe0aae3e1b5954e640672b5c7ee0a"} Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.250040 4954 scope.go:117] "RemoveContainer" containerID="b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.250065 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2m24q" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.269682 4954 scope.go:117] "RemoveContainer" containerID="4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.294028 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2m24q"] Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.295743 4954 scope.go:117] "RemoveContainer" containerID="4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.321680 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2m24q"] Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.339001 4954 scope.go:117] "RemoveContainer" containerID="b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b" Nov 28 16:56:22 crc kubenswrapper[4954]: E1128 16:56:22.339452 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b\": container with ID starting with b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b not found: ID does not exist" containerID="b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.339491 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b"} err="failed to get container status \"b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b\": rpc error: code = NotFound desc = could not find container \"b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b\": container with ID starting with b22c3f82dae6e6aed94f21a6640f5fc591e3e3a8d66eba8b3fa038a736118a2b not found: ID does not exist" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.339520 4954 scope.go:117] "RemoveContainer" containerID="4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd" Nov 28 16:56:22 crc kubenswrapper[4954]: E1128 16:56:22.339838 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd\": container with ID starting with 4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd not found: ID does not exist" containerID="4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.339886 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd"} err="failed to get container status \"4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd\": rpc error: code = NotFound desc = could not find container \"4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd\": container with ID starting with 4b085fefe9a21d385c0b0cba9231716da9f9e14a440c1b083644cf70a45584cd not found: ID does not exist" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.339921 4954 scope.go:117] "RemoveContainer" containerID="4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004" Nov 28 16:56:22 crc kubenswrapper[4954]: E1128 16:56:22.340260 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004\": container with ID starting with 4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004 not found: ID does not exist" containerID="4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004" Nov 28 16:56:22 crc kubenswrapper[4954]: I1128 16:56:22.340293 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004"} err="failed to get container status \"4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004\": rpc error: code = NotFound desc = could not find container \"4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004\": container with ID starting with 4e1654219a84ea65678b1f407346cfaf87d3038e90ae62afd4c1f866e4035004 not found: ID does not exist" Nov 28 16:56:23 crc kubenswrapper[4954]: I1128 16:56:23.864843 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" path="/var/lib/kubelet/pods/9aefc690-47ee-419d-9126-dfe98bc3d6f5/volumes" Nov 28 16:57:02 crc kubenswrapper[4954]: I1128 16:57:02.480675 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:57:02 crc kubenswrapper[4954]: I1128 16:57:02.482320 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:57:32 crc kubenswrapper[4954]: I1128 16:57:32.481874 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:57:32 crc kubenswrapper[4954]: I1128 16:57:32.482398 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.974824 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bk22q"] Nov 28 16:57:48 crc kubenswrapper[4954]: E1128 16:57:48.975657 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="registry-server" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975671 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="registry-server" Nov 28 16:57:48 crc kubenswrapper[4954]: E1128 16:57:48.975754 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="registry-server" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975760 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="registry-server" Nov 28 16:57:48 crc kubenswrapper[4954]: E1128 16:57:48.975773 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="extract-content" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975779 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="extract-content" Nov 28 16:57:48 crc kubenswrapper[4954]: E1128 16:57:48.975791 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="extract-utilities" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975798 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="extract-utilities" Nov 28 16:57:48 crc kubenswrapper[4954]: E1128 16:57:48.975811 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="extract-content" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975816 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="extract-content" Nov 28 16:57:48 crc kubenswrapper[4954]: E1128 16:57:48.975828 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="extract-utilities" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975834 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="extract-utilities" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975962 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cdf74d1-fbac-4091-a2a1-c36b972d51e9" containerName="registry-server" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.975979 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="9aefc690-47ee-419d-9126-dfe98bc3d6f5" containerName="registry-server" Nov 28 16:57:48 crc kubenswrapper[4954]: I1128 16:57:48.976959 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.011219 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bk22q"] Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.019751 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq2gr\" (UniqueName: \"kubernetes.io/projected/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-kube-api-access-zq2gr\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.020084 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-catalog-content\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.020202 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-utilities\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.121313 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-catalog-content\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.121376 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-utilities\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.121414 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq2gr\" (UniqueName: \"kubernetes.io/projected/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-kube-api-access-zq2gr\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.122322 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-catalog-content\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.122420 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-utilities\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.143102 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq2gr\" (UniqueName: \"kubernetes.io/projected/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-kube-api-access-zq2gr\") pod \"community-operators-bk22q\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.298129 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:49 crc kubenswrapper[4954]: I1128 16:57:49.795045 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bk22q"] Nov 28 16:57:50 crc kubenswrapper[4954]: I1128 16:57:50.070473 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk22q" event={"ID":"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c","Type":"ContainerStarted","Data":"9357962e64dcdc221aa46f6ced227bf5558149958494caacfe77e9259f6faf23"} Nov 28 16:57:51 crc kubenswrapper[4954]: I1128 16:57:51.078584 4954 generic.go:334] "Generic (PLEG): container finished" podID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerID="f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47" exitCode=0 Nov 28 16:57:51 crc kubenswrapper[4954]: I1128 16:57:51.081352 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk22q" event={"ID":"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c","Type":"ContainerDied","Data":"f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47"} Nov 28 16:57:53 crc kubenswrapper[4954]: I1128 16:57:53.097620 4954 generic.go:334] "Generic (PLEG): container finished" podID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerID="f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453" exitCode=0 Nov 28 16:57:53 crc kubenswrapper[4954]: I1128 16:57:53.097931 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk22q" event={"ID":"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c","Type":"ContainerDied","Data":"f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453"} Nov 28 16:57:54 crc kubenswrapper[4954]: I1128 16:57:54.110586 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk22q" event={"ID":"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c","Type":"ContainerStarted","Data":"39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1"} Nov 28 16:57:54 crc kubenswrapper[4954]: I1128 16:57:54.138194 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bk22q" podStartSLOduration=3.389450106 podStartE2EDuration="6.138170656s" podCreationTimestamp="2025-11-28 16:57:48 +0000 UTC" firstStartedPulling="2025-11-28 16:57:51.081926227 +0000 UTC m=+2824.473594768" lastFinishedPulling="2025-11-28 16:57:53.830646777 +0000 UTC m=+2827.222315318" observedRunningTime="2025-11-28 16:57:54.136638718 +0000 UTC m=+2827.528307259" watchObservedRunningTime="2025-11-28 16:57:54.138170656 +0000 UTC m=+2827.529839197" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.371258 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rb5pp"] Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.389281 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rb5pp"] Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.389413 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.416412 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-utilities\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.416810 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6chb6\" (UniqueName: \"kubernetes.io/projected/5facab11-0c1a-4675-a669-f686e86f989b-kube-api-access-6chb6\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.417046 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-catalog-content\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.518066 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-catalog-content\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.518134 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-utilities\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.518164 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6chb6\" (UniqueName: \"kubernetes.io/projected/5facab11-0c1a-4675-a669-f686e86f989b-kube-api-access-6chb6\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.518904 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-utilities\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.519288 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-catalog-content\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.538553 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6chb6\" (UniqueName: \"kubernetes.io/projected/5facab11-0c1a-4675-a669-f686e86f989b-kube-api-access-6chb6\") pod \"certified-operators-rb5pp\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:55 crc kubenswrapper[4954]: I1128 16:57:55.723225 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:57:56 crc kubenswrapper[4954]: I1128 16:57:56.239598 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rb5pp"] Nov 28 16:57:57 crc kubenswrapper[4954]: I1128 16:57:57.131494 4954 generic.go:334] "Generic (PLEG): container finished" podID="5facab11-0c1a-4675-a669-f686e86f989b" containerID="e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e" exitCode=0 Nov 28 16:57:57 crc kubenswrapper[4954]: I1128 16:57:57.131557 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb5pp" event={"ID":"5facab11-0c1a-4675-a669-f686e86f989b","Type":"ContainerDied","Data":"e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e"} Nov 28 16:57:57 crc kubenswrapper[4954]: I1128 16:57:57.131582 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb5pp" event={"ID":"5facab11-0c1a-4675-a669-f686e86f989b","Type":"ContainerStarted","Data":"e3ce8cdc56c13b6d7a70dba6a9339b00df0f24ff4a5742dfe8b63e796d6e5672"} Nov 28 16:57:59 crc kubenswrapper[4954]: I1128 16:57:59.144851 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb5pp" event={"ID":"5facab11-0c1a-4675-a669-f686e86f989b","Type":"ContainerStarted","Data":"5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520"} Nov 28 16:57:59 crc kubenswrapper[4954]: I1128 16:57:59.299154 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:59 crc kubenswrapper[4954]: I1128 16:57:59.299723 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:57:59 crc kubenswrapper[4954]: I1128 16:57:59.343637 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:58:00 crc kubenswrapper[4954]: I1128 16:58:00.153477 4954 generic.go:334] "Generic (PLEG): container finished" podID="5facab11-0c1a-4675-a669-f686e86f989b" containerID="5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520" exitCode=0 Nov 28 16:58:00 crc kubenswrapper[4954]: I1128 16:58:00.153562 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb5pp" event={"ID":"5facab11-0c1a-4675-a669-f686e86f989b","Type":"ContainerDied","Data":"5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520"} Nov 28 16:58:00 crc kubenswrapper[4954]: I1128 16:58:00.198177 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:58:00 crc kubenswrapper[4954]: I1128 16:58:00.760258 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bk22q"] Nov 28 16:58:01 crc kubenswrapper[4954]: I1128 16:58:01.165200 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb5pp" event={"ID":"5facab11-0c1a-4675-a669-f686e86f989b","Type":"ContainerStarted","Data":"ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837"} Nov 28 16:58:01 crc kubenswrapper[4954]: I1128 16:58:01.190100 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rb5pp" podStartSLOduration=2.559710187 podStartE2EDuration="6.190047223s" podCreationTimestamp="2025-11-28 16:57:55 +0000 UTC" firstStartedPulling="2025-11-28 16:57:57.134219508 +0000 UTC m=+2830.525888049" lastFinishedPulling="2025-11-28 16:58:00.764556504 +0000 UTC m=+2834.156225085" observedRunningTime="2025-11-28 16:58:01.187270375 +0000 UTC m=+2834.578938936" watchObservedRunningTime="2025-11-28 16:58:01.190047223 +0000 UTC m=+2834.581715764" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.172544 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bk22q" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="registry-server" containerID="cri-o://39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1" gracePeriod=2 Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.480795 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.481074 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.481118 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.481675 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4310770654c95c53af0403e7697612204f526223706a7050b7dbdb48cdbd9ff2"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.481717 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://4310770654c95c53af0403e7697612204f526223706a7050b7dbdb48cdbd9ff2" gracePeriod=600 Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.614559 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.815301 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zq2gr\" (UniqueName: \"kubernetes.io/projected/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-kube-api-access-zq2gr\") pod \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.815343 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-catalog-content\") pod \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.815426 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-utilities\") pod \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\" (UID: \"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c\") " Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.816557 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-utilities" (OuterVolumeSpecName: "utilities") pod "f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" (UID: "f3d88b5a-5bb5-48b0-8bf2-5185c871a39c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.821329 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-kube-api-access-zq2gr" (OuterVolumeSpecName: "kube-api-access-zq2gr") pod "f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" (UID: "f3d88b5a-5bb5-48b0-8bf2-5185c871a39c"). InnerVolumeSpecName "kube-api-access-zq2gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.916622 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zq2gr\" (UniqueName: \"kubernetes.io/projected/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-kube-api-access-zq2gr\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.916842 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:02 crc kubenswrapper[4954]: I1128 16:58:02.963999 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" (UID: "f3d88b5a-5bb5-48b0-8bf2-5185c871a39c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.017618 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.182327 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="4310770654c95c53af0403e7697612204f526223706a7050b7dbdb48cdbd9ff2" exitCode=0 Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.182385 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"4310770654c95c53af0403e7697612204f526223706a7050b7dbdb48cdbd9ff2"} Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.182414 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7"} Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.182432 4954 scope.go:117] "RemoveContainer" containerID="382959d443698ee9c37ac74cee6824211f926da2470b6b447cb41d84511c62ba" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.185582 4954 generic.go:334] "Generic (PLEG): container finished" podID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerID="39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1" exitCode=0 Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.185630 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bk22q" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.185623 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk22q" event={"ID":"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c","Type":"ContainerDied","Data":"39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1"} Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.186139 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bk22q" event={"ID":"f3d88b5a-5bb5-48b0-8bf2-5185c871a39c","Type":"ContainerDied","Data":"9357962e64dcdc221aa46f6ced227bf5558149958494caacfe77e9259f6faf23"} Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.216287 4954 scope.go:117] "RemoveContainer" containerID="39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.239404 4954 scope.go:117] "RemoveContainer" containerID="f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.239754 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bk22q"] Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.249391 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bk22q"] Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.256499 4954 scope.go:117] "RemoveContainer" containerID="f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.271608 4954 scope.go:117] "RemoveContainer" containerID="39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1" Nov 28 16:58:03 crc kubenswrapper[4954]: E1128 16:58:03.272089 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1\": container with ID starting with 39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1 not found: ID does not exist" containerID="39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.272139 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1"} err="failed to get container status \"39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1\": rpc error: code = NotFound desc = could not find container \"39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1\": container with ID starting with 39ce2b69f781279cd20ca9385d462bc75d2428f8581224eac8e5b0b8bcf2cdc1 not found: ID does not exist" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.272203 4954 scope.go:117] "RemoveContainer" containerID="f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453" Nov 28 16:58:03 crc kubenswrapper[4954]: E1128 16:58:03.272599 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453\": container with ID starting with f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453 not found: ID does not exist" containerID="f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.272638 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453"} err="failed to get container status \"f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453\": rpc error: code = NotFound desc = could not find container \"f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453\": container with ID starting with f546cf02cba48f7c145075892a77101fdc88b3df1aad715ecf6464ca9a9cb453 not found: ID does not exist" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.272658 4954 scope.go:117] "RemoveContainer" containerID="f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47" Nov 28 16:58:03 crc kubenswrapper[4954]: E1128 16:58:03.272966 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47\": container with ID starting with f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47 not found: ID does not exist" containerID="f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.273006 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47"} err="failed to get container status \"f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47\": rpc error: code = NotFound desc = could not find container \"f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47\": container with ID starting with f222b80271ea1107d9b6a48f1d6b8a06b566e15df92ab569a3c7e163963fed47 not found: ID does not exist" Nov 28 16:58:03 crc kubenswrapper[4954]: I1128 16:58:03.868056 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" path="/var/lib/kubelet/pods/f3d88b5a-5bb5-48b0-8bf2-5185c871a39c/volumes" Nov 28 16:58:05 crc kubenswrapper[4954]: I1128 16:58:05.723460 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:58:05 crc kubenswrapper[4954]: I1128 16:58:05.723959 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:58:05 crc kubenswrapper[4954]: I1128 16:58:05.798360 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:58:06 crc kubenswrapper[4954]: I1128 16:58:06.251733 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:58:06 crc kubenswrapper[4954]: I1128 16:58:06.748308 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rb5pp"] Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.223748 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rb5pp" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="registry-server" containerID="cri-o://ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837" gracePeriod=2 Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.626373 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.716028 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-utilities\") pod \"5facab11-0c1a-4675-a669-f686e86f989b\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.716271 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6chb6\" (UniqueName: \"kubernetes.io/projected/5facab11-0c1a-4675-a669-f686e86f989b-kube-api-access-6chb6\") pod \"5facab11-0c1a-4675-a669-f686e86f989b\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.716315 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-catalog-content\") pod \"5facab11-0c1a-4675-a669-f686e86f989b\" (UID: \"5facab11-0c1a-4675-a669-f686e86f989b\") " Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.717167 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-utilities" (OuterVolumeSpecName: "utilities") pod "5facab11-0c1a-4675-a669-f686e86f989b" (UID: "5facab11-0c1a-4675-a669-f686e86f989b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.725691 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5facab11-0c1a-4675-a669-f686e86f989b-kube-api-access-6chb6" (OuterVolumeSpecName: "kube-api-access-6chb6") pod "5facab11-0c1a-4675-a669-f686e86f989b" (UID: "5facab11-0c1a-4675-a669-f686e86f989b"). InnerVolumeSpecName "kube-api-access-6chb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.790277 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5facab11-0c1a-4675-a669-f686e86f989b" (UID: "5facab11-0c1a-4675-a669-f686e86f989b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.818506 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.818597 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6chb6\" (UniqueName: \"kubernetes.io/projected/5facab11-0c1a-4675-a669-f686e86f989b-kube-api-access-6chb6\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:08 crc kubenswrapper[4954]: I1128 16:58:08.818619 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5facab11-0c1a-4675-a669-f686e86f989b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.238153 4954 generic.go:334] "Generic (PLEG): container finished" podID="5facab11-0c1a-4675-a669-f686e86f989b" containerID="ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837" exitCode=0 Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.238213 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb5pp" event={"ID":"5facab11-0c1a-4675-a669-f686e86f989b","Type":"ContainerDied","Data":"ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837"} Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.238244 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rb5pp" event={"ID":"5facab11-0c1a-4675-a669-f686e86f989b","Type":"ContainerDied","Data":"e3ce8cdc56c13b6d7a70dba6a9339b00df0f24ff4a5742dfe8b63e796d6e5672"} Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.238265 4954 scope.go:117] "RemoveContainer" containerID="ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.238419 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rb5pp" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.266684 4954 scope.go:117] "RemoveContainer" containerID="5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.271000 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rb5pp"] Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.276231 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rb5pp"] Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.300931 4954 scope.go:117] "RemoveContainer" containerID="e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.317801 4954 scope.go:117] "RemoveContainer" containerID="ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837" Nov 28 16:58:09 crc kubenswrapper[4954]: E1128 16:58:09.318190 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837\": container with ID starting with ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837 not found: ID does not exist" containerID="ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.318228 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837"} err="failed to get container status \"ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837\": rpc error: code = NotFound desc = could not find container \"ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837\": container with ID starting with ffc98c064a8bfa26dc2b084da0a1b244ad1778bfbf70249a2876b7d88582b837 not found: ID does not exist" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.318254 4954 scope.go:117] "RemoveContainer" containerID="5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520" Nov 28 16:58:09 crc kubenswrapper[4954]: E1128 16:58:09.318587 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520\": container with ID starting with 5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520 not found: ID does not exist" containerID="5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.318622 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520"} err="failed to get container status \"5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520\": rpc error: code = NotFound desc = could not find container \"5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520\": container with ID starting with 5a748148f092ea94f1bcaeb19cfad93c48e145ccccf03d655a3170647278d520 not found: ID does not exist" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.318644 4954 scope.go:117] "RemoveContainer" containerID="e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e" Nov 28 16:58:09 crc kubenswrapper[4954]: E1128 16:58:09.318966 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e\": container with ID starting with e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e not found: ID does not exist" containerID="e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.318993 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e"} err="failed to get container status \"e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e\": rpc error: code = NotFound desc = could not find container \"e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e\": container with ID starting with e3faa502d14c036f629106e9fd39c17e4001f4efec4f539f49a2fba7fa4dc53e not found: ID does not exist" Nov 28 16:58:09 crc kubenswrapper[4954]: I1128 16:58:09.867472 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5facab11-0c1a-4675-a669-f686e86f989b" path="/var/lib/kubelet/pods/5facab11-0c1a-4675-a669-f686e86f989b/volumes" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.152703 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2"] Nov 28 17:00:00 crc kubenswrapper[4954]: E1128 17:00:00.153563 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153580 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4954]: E1128 17:00:00.153600 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153610 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4954]: E1128 17:00:00.153619 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153627 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4954]: E1128 17:00:00.153647 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153654 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4954]: E1128 17:00:00.153671 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153678 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="extract-content" Nov 28 17:00:00 crc kubenswrapper[4954]: E1128 17:00:00.153689 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153696 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="extract-utilities" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153847 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3d88b5a-5bb5-48b0-8bf2-5185c871a39c" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.153873 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="5facab11-0c1a-4675-a669-f686e86f989b" containerName="registry-server" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.154400 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.156915 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.162495 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.183936 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2"] Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.221054 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7cl4\" (UniqueName: \"kubernetes.io/projected/09be7fdb-9f4f-481c-b019-920da8db4953-kube-api-access-j7cl4\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.221109 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/09be7fdb-9f4f-481c-b019-920da8db4953-secret-volume\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.221147 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/09be7fdb-9f4f-481c-b019-920da8db4953-config-volume\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.322620 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7cl4\" (UniqueName: \"kubernetes.io/projected/09be7fdb-9f4f-481c-b019-920da8db4953-kube-api-access-j7cl4\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.322911 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/09be7fdb-9f4f-481c-b019-920da8db4953-secret-volume\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.323018 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/09be7fdb-9f4f-481c-b019-920da8db4953-config-volume\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.324262 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/09be7fdb-9f4f-481c-b019-920da8db4953-config-volume\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.337971 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/09be7fdb-9f4f-481c-b019-920da8db4953-secret-volume\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.339461 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7cl4\" (UniqueName: \"kubernetes.io/projected/09be7fdb-9f4f-481c-b019-920da8db4953-kube-api-access-j7cl4\") pod \"collect-profiles-29405820-fq5x2\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.476061 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.725941 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2"] Nov 28 17:00:00 crc kubenswrapper[4954]: I1128 17:00:00.836928 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" event={"ID":"09be7fdb-9f4f-481c-b019-920da8db4953","Type":"ContainerStarted","Data":"e3548e27d9b150eddea180ed0e473c1a6911c60de694394673866c1e1b0c457f"} Nov 28 17:00:01 crc kubenswrapper[4954]: I1128 17:00:01.847208 4954 generic.go:334] "Generic (PLEG): container finished" podID="09be7fdb-9f4f-481c-b019-920da8db4953" containerID="0253a1e1eb42199e436d5b053a847e53f792f4c8b9213d99aa184cfd7b86fbf3" exitCode=0 Nov 28 17:00:01 crc kubenswrapper[4954]: I1128 17:00:01.847508 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" event={"ID":"09be7fdb-9f4f-481c-b019-920da8db4953","Type":"ContainerDied","Data":"0253a1e1eb42199e436d5b053a847e53f792f4c8b9213d99aa184cfd7b86fbf3"} Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.112075 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.165295 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/09be7fdb-9f4f-481c-b019-920da8db4953-config-volume\") pod \"09be7fdb-9f4f-481c-b019-920da8db4953\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.165349 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7cl4\" (UniqueName: \"kubernetes.io/projected/09be7fdb-9f4f-481c-b019-920da8db4953-kube-api-access-j7cl4\") pod \"09be7fdb-9f4f-481c-b019-920da8db4953\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.165373 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/09be7fdb-9f4f-481c-b019-920da8db4953-secret-volume\") pod \"09be7fdb-9f4f-481c-b019-920da8db4953\" (UID: \"09be7fdb-9f4f-481c-b019-920da8db4953\") " Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.166240 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09be7fdb-9f4f-481c-b019-920da8db4953-config-volume" (OuterVolumeSpecName: "config-volume") pod "09be7fdb-9f4f-481c-b019-920da8db4953" (UID: "09be7fdb-9f4f-481c-b019-920da8db4953"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.171535 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09be7fdb-9f4f-481c-b019-920da8db4953-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "09be7fdb-9f4f-481c-b019-920da8db4953" (UID: "09be7fdb-9f4f-481c-b019-920da8db4953"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.173761 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09be7fdb-9f4f-481c-b019-920da8db4953-kube-api-access-j7cl4" (OuterVolumeSpecName: "kube-api-access-j7cl4") pod "09be7fdb-9f4f-481c-b019-920da8db4953" (UID: "09be7fdb-9f4f-481c-b019-920da8db4953"). InnerVolumeSpecName "kube-api-access-j7cl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.266918 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7cl4\" (UniqueName: \"kubernetes.io/projected/09be7fdb-9f4f-481c-b019-920da8db4953-kube-api-access-j7cl4\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.266969 4954 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/09be7fdb-9f4f-481c-b019-920da8db4953-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.266983 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/09be7fdb-9f4f-481c-b019-920da8db4953-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.865663 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.872092 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-fq5x2" event={"ID":"09be7fdb-9f4f-481c-b019-920da8db4953","Type":"ContainerDied","Data":"e3548e27d9b150eddea180ed0e473c1a6911c60de694394673866c1e1b0c457f"} Nov 28 17:00:03 crc kubenswrapper[4954]: I1128 17:00:03.872168 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3548e27d9b150eddea180ed0e473c1a6911c60de694394673866c1e1b0c457f" Nov 28 17:00:04 crc kubenswrapper[4954]: I1128 17:00:04.184160 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5"] Nov 28 17:00:04 crc kubenswrapper[4954]: I1128 17:00:04.189066 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-5plw5"] Nov 28 17:00:05 crc kubenswrapper[4954]: I1128 17:00:05.863539 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f729fc1-46e1-42a2-8def-3f831c28b8b0" path="/var/lib/kubelet/pods/5f729fc1-46e1-42a2-8def-3f831c28b8b0/volumes" Nov 28 17:00:32 crc kubenswrapper[4954]: I1128 17:00:32.480449 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:00:32 crc kubenswrapper[4954]: I1128 17:00:32.481230 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:01:02 crc kubenswrapper[4954]: I1128 17:01:02.480719 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:01:02 crc kubenswrapper[4954]: I1128 17:01:02.481241 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:01:05 crc kubenswrapper[4954]: I1128 17:01:05.542180 4954 scope.go:117] "RemoveContainer" containerID="5191d0199d9849d8b93b9c23f03cfb1f462b0caf1e5d3fbbbc7d7e4d2c739605" Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.480811 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.481523 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.481600 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.482334 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.482401 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" gracePeriod=600 Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.805834 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" exitCode=0 Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.806365 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7"} Nov 28 17:01:32 crc kubenswrapper[4954]: I1128 17:01:32.806432 4954 scope.go:117] "RemoveContainer" containerID="4310770654c95c53af0403e7697612204f526223706a7050b7dbdb48cdbd9ff2" Nov 28 17:01:32 crc kubenswrapper[4954]: E1128 17:01:32.944715 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:01:33 crc kubenswrapper[4954]: I1128 17:01:33.815781 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:01:33 crc kubenswrapper[4954]: E1128 17:01:33.816095 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:01:45 crc kubenswrapper[4954]: I1128 17:01:45.856733 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:01:45 crc kubenswrapper[4954]: E1128 17:01:45.857489 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:01:57 crc kubenswrapper[4954]: I1128 17:01:57.860834 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:01:57 crc kubenswrapper[4954]: E1128 17:01:57.861603 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:02:13 crc kubenswrapper[4954]: I1128 17:02:13.856863 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:02:13 crc kubenswrapper[4954]: E1128 17:02:13.857774 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:02:27 crc kubenswrapper[4954]: I1128 17:02:27.862716 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:02:27 crc kubenswrapper[4954]: E1128 17:02:27.864235 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:02:42 crc kubenswrapper[4954]: I1128 17:02:42.856563 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:02:42 crc kubenswrapper[4954]: E1128 17:02:42.858481 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:02:54 crc kubenswrapper[4954]: I1128 17:02:54.856708 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:02:54 crc kubenswrapper[4954]: E1128 17:02:54.857712 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:03:08 crc kubenswrapper[4954]: I1128 17:03:08.857046 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:03:08 crc kubenswrapper[4954]: E1128 17:03:08.857700 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:03:23 crc kubenswrapper[4954]: I1128 17:03:23.856217 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:03:23 crc kubenswrapper[4954]: E1128 17:03:23.857093 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:03:37 crc kubenswrapper[4954]: I1128 17:03:37.860285 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:03:37 crc kubenswrapper[4954]: E1128 17:03:37.861087 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:03:51 crc kubenswrapper[4954]: I1128 17:03:51.856367 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:03:51 crc kubenswrapper[4954]: E1128 17:03:51.857104 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:04:04 crc kubenswrapper[4954]: I1128 17:04:04.856915 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:04:04 crc kubenswrapper[4954]: E1128 17:04:04.857903 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:04:18 crc kubenswrapper[4954]: I1128 17:04:18.856463 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:04:18 crc kubenswrapper[4954]: E1128 17:04:18.857480 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:04:29 crc kubenswrapper[4954]: I1128 17:04:29.855794 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:04:29 crc kubenswrapper[4954]: E1128 17:04:29.856984 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:04:40 crc kubenswrapper[4954]: I1128 17:04:40.855770 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:04:40 crc kubenswrapper[4954]: E1128 17:04:40.856701 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:04:52 crc kubenswrapper[4954]: I1128 17:04:52.855945 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:04:52 crc kubenswrapper[4954]: E1128 17:04:52.856933 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:05:06 crc kubenswrapper[4954]: I1128 17:05:06.857415 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:05:06 crc kubenswrapper[4954]: E1128 17:05:06.858880 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:05:18 crc kubenswrapper[4954]: I1128 17:05:18.856368 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:05:18 crc kubenswrapper[4954]: E1128 17:05:18.857354 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:05:30 crc kubenswrapper[4954]: I1128 17:05:30.856801 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:05:30 crc kubenswrapper[4954]: E1128 17:05:30.858071 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:05:42 crc kubenswrapper[4954]: I1128 17:05:42.859750 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:05:42 crc kubenswrapper[4954]: E1128 17:05:42.862725 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:05:53 crc kubenswrapper[4954]: I1128 17:05:53.856411 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:05:53 crc kubenswrapper[4954]: E1128 17:05:53.857350 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:06:05 crc kubenswrapper[4954]: I1128 17:06:05.856690 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:06:05 crc kubenswrapper[4954]: E1128 17:06:05.858324 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:06:16 crc kubenswrapper[4954]: I1128 17:06:16.857393 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:06:16 crc kubenswrapper[4954]: E1128 17:06:16.859070 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:06:29 crc kubenswrapper[4954]: I1128 17:06:29.857020 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:06:29 crc kubenswrapper[4954]: E1128 17:06:29.859333 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.104367 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mls2m"] Nov 28 17:06:34 crc kubenswrapper[4954]: E1128 17:06:34.105456 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09be7fdb-9f4f-481c-b019-920da8db4953" containerName="collect-profiles" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.105475 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="09be7fdb-9f4f-481c-b019-920da8db4953" containerName="collect-profiles" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.105722 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="09be7fdb-9f4f-481c-b019-920da8db4953" containerName="collect-profiles" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.106984 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.121695 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mls2m"] Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.302592 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gngmp\" (UniqueName: \"kubernetes.io/projected/14241127-315f-4637-82e7-6b2e98684240-kube-api-access-gngmp\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.302649 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14241127-315f-4637-82e7-6b2e98684240-catalog-content\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.302693 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14241127-315f-4637-82e7-6b2e98684240-utilities\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.404198 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gngmp\" (UniqueName: \"kubernetes.io/projected/14241127-315f-4637-82e7-6b2e98684240-kube-api-access-gngmp\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.404635 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14241127-315f-4637-82e7-6b2e98684240-catalog-content\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.404772 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14241127-315f-4637-82e7-6b2e98684240-utilities\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.405309 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14241127-315f-4637-82e7-6b2e98684240-utilities\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.405316 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14241127-315f-4637-82e7-6b2e98684240-catalog-content\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.430518 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gngmp\" (UniqueName: \"kubernetes.io/projected/14241127-315f-4637-82e7-6b2e98684240-kube-api-access-gngmp\") pod \"redhat-marketplace-mls2m\" (UID: \"14241127-315f-4637-82e7-6b2e98684240\") " pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.440391 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:34 crc kubenswrapper[4954]: I1128 17:06:34.776732 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mls2m"] Nov 28 17:06:35 crc kubenswrapper[4954]: I1128 17:06:35.266998 4954 generic.go:334] "Generic (PLEG): container finished" podID="14241127-315f-4637-82e7-6b2e98684240" containerID="6363d5f3293ad1ce1dd380c9d336c5f8262e21dc03de63086b5405e17e3c1f57" exitCode=0 Nov 28 17:06:35 crc kubenswrapper[4954]: I1128 17:06:35.267099 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mls2m" event={"ID":"14241127-315f-4637-82e7-6b2e98684240","Type":"ContainerDied","Data":"6363d5f3293ad1ce1dd380c9d336c5f8262e21dc03de63086b5405e17e3c1f57"} Nov 28 17:06:35 crc kubenswrapper[4954]: I1128 17:06:35.267314 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mls2m" event={"ID":"14241127-315f-4637-82e7-6b2e98684240","Type":"ContainerStarted","Data":"5897d516ec1627c649c200db1213787cc968fe3ee2b2396d8cd96b90d48169c5"} Nov 28 17:06:35 crc kubenswrapper[4954]: I1128 17:06:35.269747 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:06:39 crc kubenswrapper[4954]: I1128 17:06:39.298336 4954 generic.go:334] "Generic (PLEG): container finished" podID="14241127-315f-4637-82e7-6b2e98684240" containerID="3036f4583749743114a4c6ce3aa3dcc14f05fa4b204367783ac6682b8505ff02" exitCode=0 Nov 28 17:06:39 crc kubenswrapper[4954]: I1128 17:06:39.298374 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mls2m" event={"ID":"14241127-315f-4637-82e7-6b2e98684240","Type":"ContainerDied","Data":"3036f4583749743114a4c6ce3aa3dcc14f05fa4b204367783ac6682b8505ff02"} Nov 28 17:06:40 crc kubenswrapper[4954]: I1128 17:06:40.308802 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mls2m" event={"ID":"14241127-315f-4637-82e7-6b2e98684240","Type":"ContainerStarted","Data":"8008341f2d96a6d83c0f13a73daaebef59bd3a86673dc2a95a3e609dcfb203e6"} Nov 28 17:06:40 crc kubenswrapper[4954]: I1128 17:06:40.359684 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mls2m" podStartSLOduration=1.6027522840000001 podStartE2EDuration="6.359664535s" podCreationTimestamp="2025-11-28 17:06:34 +0000 UTC" firstStartedPulling="2025-11-28 17:06:35.269263262 +0000 UTC m=+3348.660931833" lastFinishedPulling="2025-11-28 17:06:40.026175543 +0000 UTC m=+3353.417844084" observedRunningTime="2025-11-28 17:06:40.353702168 +0000 UTC m=+3353.745370719" watchObservedRunningTime="2025-11-28 17:06:40.359664535 +0000 UTC m=+3353.751333077" Nov 28 17:06:43 crc kubenswrapper[4954]: I1128 17:06:43.857317 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:06:44 crc kubenswrapper[4954]: I1128 17:06:44.440605 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:44 crc kubenswrapper[4954]: I1128 17:06:44.440923 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:44 crc kubenswrapper[4954]: I1128 17:06:44.490759 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:45 crc kubenswrapper[4954]: I1128 17:06:45.352452 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"1a8bfeb8d720a6ac2f6290b0da83f4883622060108336058378d22153c6d04e4"} Nov 28 17:06:45 crc kubenswrapper[4954]: I1128 17:06:45.446510 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mls2m" Nov 28 17:06:45 crc kubenswrapper[4954]: I1128 17:06:45.524212 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mls2m"] Nov 28 17:06:45 crc kubenswrapper[4954]: I1128 17:06:45.571008 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2lg2"] Nov 28 17:06:45 crc kubenswrapper[4954]: I1128 17:06:45.571266 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n2lg2" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="registry-server" containerID="cri-o://70a2eae1934b41c3d4516e6c76a77e4aa553399624c925f7a3bd6c885791df47" gracePeriod=2 Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.361258 4954 generic.go:334] "Generic (PLEG): container finished" podID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerID="70a2eae1934b41c3d4516e6c76a77e4aa553399624c925f7a3bd6c885791df47" exitCode=0 Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.361349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2lg2" event={"ID":"ae51cec8-1ee7-4bb7-bc2b-09763a11aada","Type":"ContainerDied","Data":"70a2eae1934b41c3d4516e6c76a77e4aa553399624c925f7a3bd6c885791df47"} Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.552778 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.700547 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljwsn\" (UniqueName: \"kubernetes.io/projected/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-kube-api-access-ljwsn\") pod \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.700630 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-utilities\") pod \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.700854 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-catalog-content\") pod \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\" (UID: \"ae51cec8-1ee7-4bb7-bc2b-09763a11aada\") " Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.701188 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-utilities" (OuterVolumeSpecName: "utilities") pod "ae51cec8-1ee7-4bb7-bc2b-09763a11aada" (UID: "ae51cec8-1ee7-4bb7-bc2b-09763a11aada"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.710786 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-kube-api-access-ljwsn" (OuterVolumeSpecName: "kube-api-access-ljwsn") pod "ae51cec8-1ee7-4bb7-bc2b-09763a11aada" (UID: "ae51cec8-1ee7-4bb7-bc2b-09763a11aada"). InnerVolumeSpecName "kube-api-access-ljwsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.726436 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae51cec8-1ee7-4bb7-bc2b-09763a11aada" (UID: "ae51cec8-1ee7-4bb7-bc2b-09763a11aada"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.802229 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.802261 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljwsn\" (UniqueName: \"kubernetes.io/projected/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-kube-api-access-ljwsn\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:46 crc kubenswrapper[4954]: I1128 17:06:46.802273 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae51cec8-1ee7-4bb7-bc2b-09763a11aada-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.381581 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n2lg2" Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.381922 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n2lg2" event={"ID":"ae51cec8-1ee7-4bb7-bc2b-09763a11aada","Type":"ContainerDied","Data":"6dd999537dc423e0548308ed188aa4f7d55085e3d6da31528bc0324173aab1d4"} Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.381997 4954 scope.go:117] "RemoveContainer" containerID="70a2eae1934b41c3d4516e6c76a77e4aa553399624c925f7a3bd6c885791df47" Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.422629 4954 scope.go:117] "RemoveContainer" containerID="883fde5c60726c48ca91ade0cea17b842a8fd013c82a67f28930ab8e34224d1e" Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.445311 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2lg2"] Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.448191 4954 scope.go:117] "RemoveContainer" containerID="02b1de4b73a1192bb7c1d50f93b16b3cf7d70e5b59837d499c7254c4c0a57924" Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.451157 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n2lg2"] Nov 28 17:06:47 crc kubenswrapper[4954]: I1128 17:06:47.872590 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" path="/var/lib/kubelet/pods/ae51cec8-1ee7-4bb7-bc2b-09763a11aada/volumes" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.175882 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5vhk9"] Nov 28 17:08:01 crc kubenswrapper[4954]: E1128 17:08:01.179503 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="extract-content" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.179556 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="extract-content" Nov 28 17:08:01 crc kubenswrapper[4954]: E1128 17:08:01.179584 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="registry-server" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.179595 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="registry-server" Nov 28 17:08:01 crc kubenswrapper[4954]: E1128 17:08:01.179610 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="extract-utilities" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.179622 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="extract-utilities" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.179875 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae51cec8-1ee7-4bb7-bc2b-09763a11aada" containerName="registry-server" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.182852 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.192697 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5vhk9"] Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.371577 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-catalog-content\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.371847 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-utilities\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.371913 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzghr\" (UniqueName: \"kubernetes.io/projected/f7b86622-8dfa-447d-a1b3-85203b56a0c4-kube-api-access-nzghr\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.473643 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-utilities\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.473687 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzghr\" (UniqueName: \"kubernetes.io/projected/f7b86622-8dfa-447d-a1b3-85203b56a0c4-kube-api-access-nzghr\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.473740 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-catalog-content\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.474199 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-catalog-content\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.475145 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-utilities\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.505843 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzghr\" (UniqueName: \"kubernetes.io/projected/f7b86622-8dfa-447d-a1b3-85203b56a0c4-kube-api-access-nzghr\") pod \"certified-operators-5vhk9\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:01 crc kubenswrapper[4954]: I1128 17:08:01.510673 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:02 crc kubenswrapper[4954]: I1128 17:08:02.026519 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5vhk9"] Nov 28 17:08:02 crc kubenswrapper[4954]: I1128 17:08:02.982564 4954 generic.go:334] "Generic (PLEG): container finished" podID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerID="6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94" exitCode=0 Nov 28 17:08:02 crc kubenswrapper[4954]: I1128 17:08:02.982652 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vhk9" event={"ID":"f7b86622-8dfa-447d-a1b3-85203b56a0c4","Type":"ContainerDied","Data":"6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94"} Nov 28 17:08:02 crc kubenswrapper[4954]: I1128 17:08:02.983009 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vhk9" event={"ID":"f7b86622-8dfa-447d-a1b3-85203b56a0c4","Type":"ContainerStarted","Data":"8ab0e1ca9b5ec07756f8c5d2b9ebf448a87c7828839f89fee6484f47c4cf12ab"} Nov 28 17:08:05 crc kubenswrapper[4954]: I1128 17:08:05.006725 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vhk9" event={"ID":"f7b86622-8dfa-447d-a1b3-85203b56a0c4","Type":"ContainerStarted","Data":"b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44"} Nov 28 17:08:06 crc kubenswrapper[4954]: I1128 17:08:06.016895 4954 generic.go:334] "Generic (PLEG): container finished" podID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerID="b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44" exitCode=0 Nov 28 17:08:06 crc kubenswrapper[4954]: I1128 17:08:06.016942 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vhk9" event={"ID":"f7b86622-8dfa-447d-a1b3-85203b56a0c4","Type":"ContainerDied","Data":"b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44"} Nov 28 17:08:07 crc kubenswrapper[4954]: I1128 17:08:07.028519 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vhk9" event={"ID":"f7b86622-8dfa-447d-a1b3-85203b56a0c4","Type":"ContainerStarted","Data":"8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5"} Nov 28 17:08:07 crc kubenswrapper[4954]: I1128 17:08:07.060493 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5vhk9" podStartSLOduration=2.358291086 podStartE2EDuration="6.060459767s" podCreationTimestamp="2025-11-28 17:08:01 +0000 UTC" firstStartedPulling="2025-11-28 17:08:02.984689084 +0000 UTC m=+3436.376357645" lastFinishedPulling="2025-11-28 17:08:06.686857785 +0000 UTC m=+3440.078526326" observedRunningTime="2025-11-28 17:08:07.054818829 +0000 UTC m=+3440.446487380" watchObservedRunningTime="2025-11-28 17:08:07.060459767 +0000 UTC m=+3440.452128348" Nov 28 17:08:11 crc kubenswrapper[4954]: I1128 17:08:11.511664 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:11 crc kubenswrapper[4954]: I1128 17:08:11.514244 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:11 crc kubenswrapper[4954]: I1128 17:08:11.561392 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:12 crc kubenswrapper[4954]: I1128 17:08:12.136455 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:12 crc kubenswrapper[4954]: I1128 17:08:12.195925 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5vhk9"] Nov 28 17:08:14 crc kubenswrapper[4954]: I1128 17:08:14.084404 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5vhk9" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="registry-server" containerID="cri-o://8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5" gracePeriod=2 Nov 28 17:08:14 crc kubenswrapper[4954]: I1128 17:08:14.964913 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:14 crc kubenswrapper[4954]: I1128 17:08:14.978125 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzghr\" (UniqueName: \"kubernetes.io/projected/f7b86622-8dfa-447d-a1b3-85203b56a0c4-kube-api-access-nzghr\") pod \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " Nov 28 17:08:14 crc kubenswrapper[4954]: I1128 17:08:14.978277 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-catalog-content\") pod \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " Nov 28 17:08:14 crc kubenswrapper[4954]: I1128 17:08:14.978307 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-utilities\") pod \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\" (UID: \"f7b86622-8dfa-447d-a1b3-85203b56a0c4\") " Nov 28 17:08:14 crc kubenswrapper[4954]: I1128 17:08:14.979334 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-utilities" (OuterVolumeSpecName: "utilities") pod "f7b86622-8dfa-447d-a1b3-85203b56a0c4" (UID: "f7b86622-8dfa-447d-a1b3-85203b56a0c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:08:14 crc kubenswrapper[4954]: I1128 17:08:14.984810 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b86622-8dfa-447d-a1b3-85203b56a0c4-kube-api-access-nzghr" (OuterVolumeSpecName: "kube-api-access-nzghr") pod "f7b86622-8dfa-447d-a1b3-85203b56a0c4" (UID: "f7b86622-8dfa-447d-a1b3-85203b56a0c4"). InnerVolumeSpecName "kube-api-access-nzghr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.039327 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7b86622-8dfa-447d-a1b3-85203b56a0c4" (UID: "f7b86622-8dfa-447d-a1b3-85203b56a0c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.080750 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.080811 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7b86622-8dfa-447d-a1b3-85203b56a0c4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.080831 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzghr\" (UniqueName: \"kubernetes.io/projected/f7b86622-8dfa-447d-a1b3-85203b56a0c4-kube-api-access-nzghr\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.096389 4954 generic.go:334] "Generic (PLEG): container finished" podID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerID="8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5" exitCode=0 Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.096441 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vhk9" event={"ID":"f7b86622-8dfa-447d-a1b3-85203b56a0c4","Type":"ContainerDied","Data":"8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5"} Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.096495 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vhk9" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.096518 4954 scope.go:117] "RemoveContainer" containerID="8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.096501 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vhk9" event={"ID":"f7b86622-8dfa-447d-a1b3-85203b56a0c4","Type":"ContainerDied","Data":"8ab0e1ca9b5ec07756f8c5d2b9ebf448a87c7828839f89fee6484f47c4cf12ab"} Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.121224 4954 scope.go:117] "RemoveContainer" containerID="b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.160130 4954 scope.go:117] "RemoveContainer" containerID="6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.167434 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5vhk9"] Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.180620 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5vhk9"] Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.190705 4954 scope.go:117] "RemoveContainer" containerID="8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5" Nov 28 17:08:15 crc kubenswrapper[4954]: E1128 17:08:15.191228 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5\": container with ID starting with 8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5 not found: ID does not exist" containerID="8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.191275 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5"} err="failed to get container status \"8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5\": rpc error: code = NotFound desc = could not find container \"8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5\": container with ID starting with 8381921e3af88019ca56ae554c3357047863a3747d07c76d2b51b543737bb7e5 not found: ID does not exist" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.191304 4954 scope.go:117] "RemoveContainer" containerID="b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44" Nov 28 17:08:15 crc kubenswrapper[4954]: E1128 17:08:15.191719 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44\": container with ID starting with b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44 not found: ID does not exist" containerID="b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.191773 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44"} err="failed to get container status \"b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44\": rpc error: code = NotFound desc = could not find container \"b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44\": container with ID starting with b774d6ebd9c7179cdef2da6c7bc7e6db9908fcabc1a32e8d58e6f8b139babb44 not found: ID does not exist" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.191804 4954 scope.go:117] "RemoveContainer" containerID="6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94" Nov 28 17:08:15 crc kubenswrapper[4954]: E1128 17:08:15.193121 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94\": container with ID starting with 6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94 not found: ID does not exist" containerID="6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.193159 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94"} err="failed to get container status \"6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94\": rpc error: code = NotFound desc = could not find container \"6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94\": container with ID starting with 6b5b61a95deba8ccd1c3dc08d8ef6dfb9b73bd0bd088cd6bedcdb93748a7ab94 not found: ID does not exist" Nov 28 17:08:15 crc kubenswrapper[4954]: I1128 17:08:15.871888 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" path="/var/lib/kubelet/pods/f7b86622-8dfa-447d-a1b3-85203b56a0c4/volumes" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.139062 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gblz5"] Nov 28 17:08:48 crc kubenswrapper[4954]: E1128 17:08:48.140230 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="extract-content" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.140248 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="extract-content" Nov 28 17:08:48 crc kubenswrapper[4954]: E1128 17:08:48.140274 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="extract-utilities" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.140281 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="extract-utilities" Nov 28 17:08:48 crc kubenswrapper[4954]: E1128 17:08:48.140289 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="registry-server" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.140296 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="registry-server" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.140430 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b86622-8dfa-447d-a1b3-85203b56a0c4" containerName="registry-server" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.141404 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.152762 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gblz5"] Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.204477 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-utilities\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.204722 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfkrn\" (UniqueName: \"kubernetes.io/projected/d362f405-f672-41f7-9454-a7b2bcccb024-kube-api-access-tfkrn\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.204901 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-catalog-content\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.305716 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-utilities\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.305795 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfkrn\" (UniqueName: \"kubernetes.io/projected/d362f405-f672-41f7-9454-a7b2bcccb024-kube-api-access-tfkrn\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.305832 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-catalog-content\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.306266 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-utilities\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.306293 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-catalog-content\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.331562 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfkrn\" (UniqueName: \"kubernetes.io/projected/d362f405-f672-41f7-9454-a7b2bcccb024-kube-api-access-tfkrn\") pod \"redhat-operators-gblz5\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.508601 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:48 crc kubenswrapper[4954]: I1128 17:08:48.934562 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gblz5"] Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.129512 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g4fk7"] Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.130904 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.144886 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g4fk7"] Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.215268 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9mm7\" (UniqueName: \"kubernetes.io/projected/c32dd912-4f52-4ca5-91c5-ae87cfacf761-kube-api-access-m9mm7\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.215311 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-utilities\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.215346 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-catalog-content\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.316711 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9mm7\" (UniqueName: \"kubernetes.io/projected/c32dd912-4f52-4ca5-91c5-ae87cfacf761-kube-api-access-m9mm7\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.316758 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-utilities\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.316794 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-catalog-content\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.317326 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-catalog-content\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.317877 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-utilities\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.336842 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9mm7\" (UniqueName: \"kubernetes.io/projected/c32dd912-4f52-4ca5-91c5-ae87cfacf761-kube-api-access-m9mm7\") pod \"community-operators-g4fk7\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.378539 4954 generic.go:334] "Generic (PLEG): container finished" podID="d362f405-f672-41f7-9454-a7b2bcccb024" containerID="8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104" exitCode=0 Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.378599 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gblz5" event={"ID":"d362f405-f672-41f7-9454-a7b2bcccb024","Type":"ContainerDied","Data":"8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104"} Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.378631 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gblz5" event={"ID":"d362f405-f672-41f7-9454-a7b2bcccb024","Type":"ContainerStarted","Data":"0b04324c5c60fb86b137f23a5b613bf50089bf6d610d19f36759f72fa5fbf014"} Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.485908 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:49 crc kubenswrapper[4954]: I1128 17:08:49.770361 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g4fk7"] Nov 28 17:08:49 crc kubenswrapper[4954]: W1128 17:08:49.775167 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc32dd912_4f52_4ca5_91c5_ae87cfacf761.slice/crio-cc119d09da6681006cbc805e3a09ec8ade92a504913064d1e7c4610a953550d9 WatchSource:0}: Error finding container cc119d09da6681006cbc805e3a09ec8ade92a504913064d1e7c4610a953550d9: Status 404 returned error can't find the container with id cc119d09da6681006cbc805e3a09ec8ade92a504913064d1e7c4610a953550d9 Nov 28 17:08:50 crc kubenswrapper[4954]: I1128 17:08:50.388413 4954 generic.go:334] "Generic (PLEG): container finished" podID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerID="18709a95900d1776abfd546e79c06716305b774e24a28574de3db82f4aeb9a79" exitCode=0 Nov 28 17:08:50 crc kubenswrapper[4954]: I1128 17:08:50.388369 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g4fk7" event={"ID":"c32dd912-4f52-4ca5-91c5-ae87cfacf761","Type":"ContainerDied","Data":"18709a95900d1776abfd546e79c06716305b774e24a28574de3db82f4aeb9a79"} Nov 28 17:08:50 crc kubenswrapper[4954]: I1128 17:08:50.388865 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g4fk7" event={"ID":"c32dd912-4f52-4ca5-91c5-ae87cfacf761","Type":"ContainerStarted","Data":"cc119d09da6681006cbc805e3a09ec8ade92a504913064d1e7c4610a953550d9"} Nov 28 17:08:51 crc kubenswrapper[4954]: I1128 17:08:51.398240 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gblz5" event={"ID":"d362f405-f672-41f7-9454-a7b2bcccb024","Type":"ContainerStarted","Data":"a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462"} Nov 28 17:08:52 crc kubenswrapper[4954]: I1128 17:08:52.409463 4954 generic.go:334] "Generic (PLEG): container finished" podID="d362f405-f672-41f7-9454-a7b2bcccb024" containerID="a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462" exitCode=0 Nov 28 17:08:52 crc kubenswrapper[4954]: I1128 17:08:52.409515 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gblz5" event={"ID":"d362f405-f672-41f7-9454-a7b2bcccb024","Type":"ContainerDied","Data":"a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462"} Nov 28 17:08:53 crc kubenswrapper[4954]: E1128 17:08:53.123645 4954 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc32dd912_4f52_4ca5_91c5_ae87cfacf761.slice/crio-conmon-ccf8c1a6a568a029850ab4667e2b981018ba4c0499607fed6365439365bc64c6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc32dd912_4f52_4ca5_91c5_ae87cfacf761.slice/crio-ccf8c1a6a568a029850ab4667e2b981018ba4c0499607fed6365439365bc64c6.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:08:53 crc kubenswrapper[4954]: I1128 17:08:53.418085 4954 generic.go:334] "Generic (PLEG): container finished" podID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerID="ccf8c1a6a568a029850ab4667e2b981018ba4c0499607fed6365439365bc64c6" exitCode=0 Nov 28 17:08:53 crc kubenswrapper[4954]: I1128 17:08:53.418127 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g4fk7" event={"ID":"c32dd912-4f52-4ca5-91c5-ae87cfacf761","Type":"ContainerDied","Data":"ccf8c1a6a568a029850ab4667e2b981018ba4c0499607fed6365439365bc64c6"} Nov 28 17:08:54 crc kubenswrapper[4954]: I1128 17:08:54.426363 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gblz5" event={"ID":"d362f405-f672-41f7-9454-a7b2bcccb024","Type":"ContainerStarted","Data":"8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119"} Nov 28 17:08:54 crc kubenswrapper[4954]: I1128 17:08:54.448067 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gblz5" podStartSLOduration=2.198294673 podStartE2EDuration="6.448048858s" podCreationTimestamp="2025-11-28 17:08:48 +0000 UTC" firstStartedPulling="2025-11-28 17:08:49.380276339 +0000 UTC m=+3482.771944870" lastFinishedPulling="2025-11-28 17:08:53.630030514 +0000 UTC m=+3487.021699055" observedRunningTime="2025-11-28 17:08:54.441729509 +0000 UTC m=+3487.833398070" watchObservedRunningTime="2025-11-28 17:08:54.448048858 +0000 UTC m=+3487.839717399" Nov 28 17:08:55 crc kubenswrapper[4954]: I1128 17:08:55.436385 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g4fk7" event={"ID":"c32dd912-4f52-4ca5-91c5-ae87cfacf761","Type":"ContainerStarted","Data":"9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556"} Nov 28 17:08:55 crc kubenswrapper[4954]: I1128 17:08:55.455652 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g4fk7" podStartSLOduration=2.719749772 podStartE2EDuration="6.455634929s" podCreationTimestamp="2025-11-28 17:08:49 +0000 UTC" firstStartedPulling="2025-11-28 17:08:50.391892585 +0000 UTC m=+3483.783561126" lastFinishedPulling="2025-11-28 17:08:54.127777742 +0000 UTC m=+3487.519446283" observedRunningTime="2025-11-28 17:08:55.45505909 +0000 UTC m=+3488.846727631" watchObservedRunningTime="2025-11-28 17:08:55.455634929 +0000 UTC m=+3488.847303470" Nov 28 17:08:58 crc kubenswrapper[4954]: I1128 17:08:58.508731 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:58 crc kubenswrapper[4954]: I1128 17:08:58.509288 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:08:59 crc kubenswrapper[4954]: I1128 17:08:59.486412 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:59 crc kubenswrapper[4954]: I1128 17:08:59.486447 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:59 crc kubenswrapper[4954]: I1128 17:08:59.528689 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:08:59 crc kubenswrapper[4954]: I1128 17:08:59.552101 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gblz5" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="registry-server" probeResult="failure" output=< Nov 28 17:08:59 crc kubenswrapper[4954]: timeout: failed to connect service ":50051" within 1s Nov 28 17:08:59 crc kubenswrapper[4954]: > Nov 28 17:09:00 crc kubenswrapper[4954]: I1128 17:09:00.522427 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:09:00 crc kubenswrapper[4954]: I1128 17:09:00.567628 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g4fk7"] Nov 28 17:09:02 crc kubenswrapper[4954]: I1128 17:09:02.482786 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:09:02 crc kubenswrapper[4954]: I1128 17:09:02.484121 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:09:02 crc kubenswrapper[4954]: I1128 17:09:02.494090 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g4fk7" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="registry-server" containerID="cri-o://9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556" gracePeriod=2 Nov 28 17:09:08 crc kubenswrapper[4954]: I1128 17:09:08.553879 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:09:08 crc kubenswrapper[4954]: I1128 17:09:08.597329 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:09:08 crc kubenswrapper[4954]: I1128 17:09:08.785478 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gblz5"] Nov 28 17:09:09 crc kubenswrapper[4954]: E1128 17:09:09.487299 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556 is running failed: container process not found" containerID="9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 17:09:09 crc kubenswrapper[4954]: E1128 17:09:09.487766 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556 is running failed: container process not found" containerID="9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 17:09:09 crc kubenswrapper[4954]: E1128 17:09:09.488139 4954 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556 is running failed: container process not found" containerID="9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 17:09:09 crc kubenswrapper[4954]: E1128 17:09:09.488229 4954 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-g4fk7" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="registry-server" Nov 28 17:09:09 crc kubenswrapper[4954]: I1128 17:09:09.854994 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:09:09 crc kubenswrapper[4954]: I1128 17:09:09.933779 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-catalog-content\") pod \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " Nov 28 17:09:09 crc kubenswrapper[4954]: I1128 17:09:09.933874 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9mm7\" (UniqueName: \"kubernetes.io/projected/c32dd912-4f52-4ca5-91c5-ae87cfacf761-kube-api-access-m9mm7\") pod \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " Nov 28 17:09:09 crc kubenswrapper[4954]: I1128 17:09:09.933998 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-utilities\") pod \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\" (UID: \"c32dd912-4f52-4ca5-91c5-ae87cfacf761\") " Nov 28 17:09:09 crc kubenswrapper[4954]: I1128 17:09:09.935377 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-utilities" (OuterVolumeSpecName: "utilities") pod "c32dd912-4f52-4ca5-91c5-ae87cfacf761" (UID: "c32dd912-4f52-4ca5-91c5-ae87cfacf761"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:09:09 crc kubenswrapper[4954]: I1128 17:09:09.939621 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c32dd912-4f52-4ca5-91c5-ae87cfacf761-kube-api-access-m9mm7" (OuterVolumeSpecName: "kube-api-access-m9mm7") pod "c32dd912-4f52-4ca5-91c5-ae87cfacf761" (UID: "c32dd912-4f52-4ca5-91c5-ae87cfacf761"). InnerVolumeSpecName "kube-api-access-m9mm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:09:09 crc kubenswrapper[4954]: I1128 17:09:09.986026 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c32dd912-4f52-4ca5-91c5-ae87cfacf761" (UID: "c32dd912-4f52-4ca5-91c5-ae87cfacf761"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.035656 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.035690 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c32dd912-4f52-4ca5-91c5-ae87cfacf761-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.035706 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9mm7\" (UniqueName: \"kubernetes.io/projected/c32dd912-4f52-4ca5-91c5-ae87cfacf761-kube-api-access-m9mm7\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.261751 4954 generic.go:334] "Generic (PLEG): container finished" podID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerID="9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556" exitCode=0 Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.261827 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g4fk7" event={"ID":"c32dd912-4f52-4ca5-91c5-ae87cfacf761","Type":"ContainerDied","Data":"9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556"} Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.261875 4954 scope.go:117] "RemoveContainer" containerID="9fd1830611b9ca624720e4dc3553ee4d41b13b5c2fd73b25d0bd361167ea2556" Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.293406 4954 scope.go:117] "RemoveContainer" containerID="ccf8c1a6a568a029850ab4667e2b981018ba4c0499607fed6365439365bc64c6" Nov 28 17:09:10 crc kubenswrapper[4954]: I1128 17:09:10.327981 4954 scope.go:117] "RemoveContainer" containerID="18709a95900d1776abfd546e79c06716305b774e24a28574de3db82f4aeb9a79" Nov 28 17:09:11 crc kubenswrapper[4954]: I1128 17:09:11.274581 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g4fk7" event={"ID":"c32dd912-4f52-4ca5-91c5-ae87cfacf761","Type":"ContainerDied","Data":"cc119d09da6681006cbc805e3a09ec8ade92a504913064d1e7c4610a953550d9"} Nov 28 17:09:11 crc kubenswrapper[4954]: I1128 17:09:11.274605 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g4fk7" Nov 28 17:09:11 crc kubenswrapper[4954]: I1128 17:09:11.316557 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g4fk7"] Nov 28 17:09:11 crc kubenswrapper[4954]: I1128 17:09:11.325831 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g4fk7"] Nov 28 17:09:11 crc kubenswrapper[4954]: I1128 17:09:11.866188 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" path="/var/lib/kubelet/pods/c32dd912-4f52-4ca5-91c5-ae87cfacf761/volumes" Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.281071 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gblz5" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="registry-server" containerID="cri-o://8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119" gracePeriod=2 Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.658953 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.784235 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-utilities\") pod \"d362f405-f672-41f7-9454-a7b2bcccb024\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.784315 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-catalog-content\") pod \"d362f405-f672-41f7-9454-a7b2bcccb024\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.784366 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfkrn\" (UniqueName: \"kubernetes.io/projected/d362f405-f672-41f7-9454-a7b2bcccb024-kube-api-access-tfkrn\") pod \"d362f405-f672-41f7-9454-a7b2bcccb024\" (UID: \"d362f405-f672-41f7-9454-a7b2bcccb024\") " Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.785567 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-utilities" (OuterVolumeSpecName: "utilities") pod "d362f405-f672-41f7-9454-a7b2bcccb024" (UID: "d362f405-f672-41f7-9454-a7b2bcccb024"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.791115 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d362f405-f672-41f7-9454-a7b2bcccb024-kube-api-access-tfkrn" (OuterVolumeSpecName: "kube-api-access-tfkrn") pod "d362f405-f672-41f7-9454-a7b2bcccb024" (UID: "d362f405-f672-41f7-9454-a7b2bcccb024"). InnerVolumeSpecName "kube-api-access-tfkrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.887521 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfkrn\" (UniqueName: \"kubernetes.io/projected/d362f405-f672-41f7-9454-a7b2bcccb024-kube-api-access-tfkrn\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.887566 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.906938 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d362f405-f672-41f7-9454-a7b2bcccb024" (UID: "d362f405-f672-41f7-9454-a7b2bcccb024"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:09:12 crc kubenswrapper[4954]: I1128 17:09:12.988797 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d362f405-f672-41f7-9454-a7b2bcccb024-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.289337 4954 generic.go:334] "Generic (PLEG): container finished" podID="d362f405-f672-41f7-9454-a7b2bcccb024" containerID="8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119" exitCode=0 Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.289428 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gblz5" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.289426 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gblz5" event={"ID":"d362f405-f672-41f7-9454-a7b2bcccb024","Type":"ContainerDied","Data":"8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119"} Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.289772 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gblz5" event={"ID":"d362f405-f672-41f7-9454-a7b2bcccb024","Type":"ContainerDied","Data":"0b04324c5c60fb86b137f23a5b613bf50089bf6d610d19f36759f72fa5fbf014"} Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.289798 4954 scope.go:117] "RemoveContainer" containerID="8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.317831 4954 scope.go:117] "RemoveContainer" containerID="a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.322916 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gblz5"] Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.330925 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gblz5"] Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.334351 4954 scope.go:117] "RemoveContainer" containerID="8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.358147 4954 scope.go:117] "RemoveContainer" containerID="8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119" Nov 28 17:09:13 crc kubenswrapper[4954]: E1128 17:09:13.358682 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119\": container with ID starting with 8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119 not found: ID does not exist" containerID="8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.358804 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119"} err="failed to get container status \"8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119\": rpc error: code = NotFound desc = could not find container \"8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119\": container with ID starting with 8aaf6259939cf5eceee2b31a98fd0335671b2a0735f2a867c5362baa325a3119 not found: ID does not exist" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.358908 4954 scope.go:117] "RemoveContainer" containerID="a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462" Nov 28 17:09:13 crc kubenswrapper[4954]: E1128 17:09:13.359303 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462\": container with ID starting with a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462 not found: ID does not exist" containerID="a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.359494 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462"} err="failed to get container status \"a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462\": rpc error: code = NotFound desc = could not find container \"a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462\": container with ID starting with a5ef1fe109784417e7cb0820036d36061f32321c62740b430f613bd08be9b462 not found: ID does not exist" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.359743 4954 scope.go:117] "RemoveContainer" containerID="8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104" Nov 28 17:09:13 crc kubenswrapper[4954]: E1128 17:09:13.360212 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104\": container with ID starting with 8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104 not found: ID does not exist" containerID="8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.360330 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104"} err="failed to get container status \"8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104\": rpc error: code = NotFound desc = could not find container \"8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104\": container with ID starting with 8ae02278a4ff4a2361ca83cac34b2a5e75df0f8ee9a8fec8c1cc7ce18c230104 not found: ID does not exist" Nov 28 17:09:13 crc kubenswrapper[4954]: I1128 17:09:13.871833 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" path="/var/lib/kubelet/pods/d362f405-f672-41f7-9454-a7b2bcccb024/volumes" Nov 28 17:09:32 crc kubenswrapper[4954]: I1128 17:09:32.481069 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:09:32 crc kubenswrapper[4954]: I1128 17:09:32.481863 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.480685 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.481352 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.481407 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.482963 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1a8bfeb8d720a6ac2f6290b0da83f4883622060108336058378d22153c6d04e4"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.483050 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://1a8bfeb8d720a6ac2f6290b0da83f4883622060108336058378d22153c6d04e4" gracePeriod=600 Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.667350 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="1a8bfeb8d720a6ac2f6290b0da83f4883622060108336058378d22153c6d04e4" exitCode=0 Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.667558 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"1a8bfeb8d720a6ac2f6290b0da83f4883622060108336058378d22153c6d04e4"} Nov 28 17:10:02 crc kubenswrapper[4954]: I1128 17:10:02.667839 4954 scope.go:117] "RemoveContainer" containerID="0e1bd9c267f084d1de64d8cfd6e35e9f8839e3713722346b1eb8396f733b11d7" Nov 28 17:10:03 crc kubenswrapper[4954]: I1128 17:10:03.677203 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645"} Nov 28 17:12:02 crc kubenswrapper[4954]: I1128 17:12:02.480571 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:12:02 crc kubenswrapper[4954]: I1128 17:12:02.481074 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:12:32 crc kubenswrapper[4954]: I1128 17:12:32.481339 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:12:32 crc kubenswrapper[4954]: I1128 17:12:32.481984 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:13:02 crc kubenswrapper[4954]: I1128 17:13:02.480501 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:13:02 crc kubenswrapper[4954]: I1128 17:13:02.481095 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:13:02 crc kubenswrapper[4954]: I1128 17:13:02.481136 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 17:13:02 crc kubenswrapper[4954]: I1128 17:13:02.481808 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:13:02 crc kubenswrapper[4954]: I1128 17:13:02.481881 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" gracePeriod=600 Nov 28 17:13:02 crc kubenswrapper[4954]: E1128 17:13:02.607801 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:13:03 crc kubenswrapper[4954]: I1128 17:13:03.144717 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" exitCode=0 Nov 28 17:13:03 crc kubenswrapper[4954]: I1128 17:13:03.144763 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645"} Nov 28 17:13:03 crc kubenswrapper[4954]: I1128 17:13:03.144798 4954 scope.go:117] "RemoveContainer" containerID="1a8bfeb8d720a6ac2f6290b0da83f4883622060108336058378d22153c6d04e4" Nov 28 17:13:03 crc kubenswrapper[4954]: I1128 17:13:03.145448 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:13:03 crc kubenswrapper[4954]: E1128 17:13:03.145793 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:13:16 crc kubenswrapper[4954]: I1128 17:13:16.856024 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:13:16 crc kubenswrapper[4954]: E1128 17:13:16.856882 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:13:27 crc kubenswrapper[4954]: I1128 17:13:27.859989 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:13:27 crc kubenswrapper[4954]: E1128 17:13:27.861356 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:13:42 crc kubenswrapper[4954]: I1128 17:13:42.855672 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:13:42 crc kubenswrapper[4954]: E1128 17:13:42.856428 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:13:55 crc kubenswrapper[4954]: I1128 17:13:55.856940 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:13:55 crc kubenswrapper[4954]: E1128 17:13:55.857732 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:14:08 crc kubenswrapper[4954]: I1128 17:14:08.856169 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:14:08 crc kubenswrapper[4954]: E1128 17:14:08.856801 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:14:20 crc kubenswrapper[4954]: I1128 17:14:20.855942 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:14:20 crc kubenswrapper[4954]: E1128 17:14:20.856711 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:14:35 crc kubenswrapper[4954]: I1128 17:14:35.857048 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:14:35 crc kubenswrapper[4954]: E1128 17:14:35.859145 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:14:47 crc kubenswrapper[4954]: I1128 17:14:47.865275 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:14:47 crc kubenswrapper[4954]: E1128 17:14:47.866081 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.179826 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp"] Nov 28 17:15:00 crc kubenswrapper[4954]: E1128 17:15:00.181999 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.182083 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4954]: E1128 17:15:00.182157 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.182253 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4954]: E1128 17:15:00.182353 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.182429 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4954]: E1128 17:15:00.182499 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.182583 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4954]: E1128 17:15:00.182653 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.183670 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4954]: E1128 17:15:00.183713 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.183722 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.185113 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c32dd912-4f52-4ca5-91c5-ae87cfacf761" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.185138 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="d362f405-f672-41f7-9454-a7b2bcccb024" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.185624 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.187887 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.188207 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.190157 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp"] Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.286063 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-config-volume\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.286274 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pncwz\" (UniqueName: \"kubernetes.io/projected/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-kube-api-access-pncwz\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.286361 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-secret-volume\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.387917 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-config-volume\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.388308 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pncwz\" (UniqueName: \"kubernetes.io/projected/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-kube-api-access-pncwz\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.389645 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-secret-volume\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.390919 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-config-volume\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.396250 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-secret-volume\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.408164 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pncwz\" (UniqueName: \"kubernetes.io/projected/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-kube-api-access-pncwz\") pod \"collect-profiles-29405835-4m5jp\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.518356 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.855874 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:15:00 crc kubenswrapper[4954]: E1128 17:15:00.856201 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:15:00 crc kubenswrapper[4954]: I1128 17:15:00.971481 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp"] Nov 28 17:15:01 crc kubenswrapper[4954]: I1128 17:15:01.026992 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" event={"ID":"c1380ef6-e3aa-4a43-96f3-c6729e7299e9","Type":"ContainerStarted","Data":"694714c2a40011d08c1dcd2c0e90cd932ed362fb3fd7c13c21434014444ac719"} Nov 28 17:15:02 crc kubenswrapper[4954]: I1128 17:15:02.043552 4954 generic.go:334] "Generic (PLEG): container finished" podID="c1380ef6-e3aa-4a43-96f3-c6729e7299e9" containerID="b8d3b3b327883a969c17d34a8f2dd14d0f0d6f60661983e4c9030a91ccf38a7a" exitCode=0 Nov 28 17:15:02 crc kubenswrapper[4954]: I1128 17:15:02.043703 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" event={"ID":"c1380ef6-e3aa-4a43-96f3-c6729e7299e9","Type":"ContainerDied","Data":"b8d3b3b327883a969c17d34a8f2dd14d0f0d6f60661983e4c9030a91ccf38a7a"} Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.332068 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.435477 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pncwz\" (UniqueName: \"kubernetes.io/projected/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-kube-api-access-pncwz\") pod \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.435891 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-config-volume\") pod \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.435926 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-secret-volume\") pod \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\" (UID: \"c1380ef6-e3aa-4a43-96f3-c6729e7299e9\") " Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.436651 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-config-volume" (OuterVolumeSpecName: "config-volume") pod "c1380ef6-e3aa-4a43-96f3-c6729e7299e9" (UID: "c1380ef6-e3aa-4a43-96f3-c6729e7299e9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.442002 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c1380ef6-e3aa-4a43-96f3-c6729e7299e9" (UID: "c1380ef6-e3aa-4a43-96f3-c6729e7299e9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.442267 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-kube-api-access-pncwz" (OuterVolumeSpecName: "kube-api-access-pncwz") pod "c1380ef6-e3aa-4a43-96f3-c6729e7299e9" (UID: "c1380ef6-e3aa-4a43-96f3-c6729e7299e9"). InnerVolumeSpecName "kube-api-access-pncwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.538141 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.538188 4954 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:03 crc kubenswrapper[4954]: I1128 17:15:03.538208 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pncwz\" (UniqueName: \"kubernetes.io/projected/c1380ef6-e3aa-4a43-96f3-c6729e7299e9-kube-api-access-pncwz\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:04 crc kubenswrapper[4954]: I1128 17:15:04.063713 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" event={"ID":"c1380ef6-e3aa-4a43-96f3-c6729e7299e9","Type":"ContainerDied","Data":"694714c2a40011d08c1dcd2c0e90cd932ed362fb3fd7c13c21434014444ac719"} Nov 28 17:15:04 crc kubenswrapper[4954]: I1128 17:15:04.063780 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="694714c2a40011d08c1dcd2c0e90cd932ed362fb3fd7c13c21434014444ac719" Nov 28 17:15:04 crc kubenswrapper[4954]: I1128 17:15:04.063821 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-4m5jp" Nov 28 17:15:04 crc kubenswrapper[4954]: I1128 17:15:04.411882 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq"] Nov 28 17:15:04 crc kubenswrapper[4954]: I1128 17:15:04.416381 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-hrdjq"] Nov 28 17:15:05 crc kubenswrapper[4954]: I1128 17:15:05.865915 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e80c711-3bc5-4e23-baf4-58a5e12a287d" path="/var/lib/kubelet/pods/4e80c711-3bc5-4e23-baf4-58a5e12a287d/volumes" Nov 28 17:15:15 crc kubenswrapper[4954]: I1128 17:15:15.857305 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:15:15 crc kubenswrapper[4954]: E1128 17:15:15.858088 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:15:28 crc kubenswrapper[4954]: I1128 17:15:28.856484 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:15:28 crc kubenswrapper[4954]: E1128 17:15:28.857234 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:15:40 crc kubenswrapper[4954]: I1128 17:15:40.855624 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:15:40 crc kubenswrapper[4954]: E1128 17:15:40.856380 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:15:53 crc kubenswrapper[4954]: I1128 17:15:53.855942 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:15:53 crc kubenswrapper[4954]: E1128 17:15:53.856591 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:16:05 crc kubenswrapper[4954]: I1128 17:16:05.861914 4954 scope.go:117] "RemoveContainer" containerID="7c9a2fe6844965d9f856d01f84e3192c4b05d855025c2ba5cf7660c022bbbf78" Nov 28 17:16:07 crc kubenswrapper[4954]: I1128 17:16:07.863560 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:16:07 crc kubenswrapper[4954]: E1128 17:16:07.864653 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:16:22 crc kubenswrapper[4954]: I1128 17:16:22.856324 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:16:22 crc kubenswrapper[4954]: E1128 17:16:22.857175 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:16:34 crc kubenswrapper[4954]: I1128 17:16:34.856628 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:16:34 crc kubenswrapper[4954]: E1128 17:16:34.857394 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:16:47 crc kubenswrapper[4954]: I1128 17:16:47.860394 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:16:47 crc kubenswrapper[4954]: E1128 17:16:47.861141 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:17:02 crc kubenswrapper[4954]: I1128 17:17:02.855747 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:17:02 crc kubenswrapper[4954]: E1128 17:17:02.856211 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:17:14 crc kubenswrapper[4954]: I1128 17:17:14.855837 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:17:14 crc kubenswrapper[4954]: E1128 17:17:14.856640 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.179269 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cnr2n"] Nov 28 17:17:21 crc kubenswrapper[4954]: E1128 17:17:21.180825 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1380ef6-e3aa-4a43-96f3-c6729e7299e9" containerName="collect-profiles" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.180846 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1380ef6-e3aa-4a43-96f3-c6729e7299e9" containerName="collect-profiles" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.181055 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1380ef6-e3aa-4a43-96f3-c6729e7299e9" containerName="collect-profiles" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.182255 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.191793 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnr2n"] Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.316124 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r526s\" (UniqueName: \"kubernetes.io/projected/bb5154b3-657f-4e42-a0db-8c06ab5b5185-kube-api-access-r526s\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.316211 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-utilities\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.316240 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-catalog-content\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.417403 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-catalog-content\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.417483 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r526s\" (UniqueName: \"kubernetes.io/projected/bb5154b3-657f-4e42-a0db-8c06ab5b5185-kube-api-access-r526s\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.417552 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-utilities\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.417986 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-utilities\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.418161 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-catalog-content\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.436813 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r526s\" (UniqueName: \"kubernetes.io/projected/bb5154b3-657f-4e42-a0db-8c06ab5b5185-kube-api-access-r526s\") pod \"redhat-marketplace-cnr2n\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.503686 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:21 crc kubenswrapper[4954]: I1128 17:17:21.926822 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnr2n"] Nov 28 17:17:21 crc kubenswrapper[4954]: W1128 17:17:21.935919 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb5154b3_657f_4e42_a0db_8c06ab5b5185.slice/crio-6375dfd1146b382d0bacd5af08336d4ff171604d0a58fd98c815eed9c9a7b61b WatchSource:0}: Error finding container 6375dfd1146b382d0bacd5af08336d4ff171604d0a58fd98c815eed9c9a7b61b: Status 404 returned error can't find the container with id 6375dfd1146b382d0bacd5af08336d4ff171604d0a58fd98c815eed9c9a7b61b Nov 28 17:17:22 crc kubenswrapper[4954]: I1128 17:17:22.062168 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnr2n" event={"ID":"bb5154b3-657f-4e42-a0db-8c06ab5b5185","Type":"ContainerStarted","Data":"6375dfd1146b382d0bacd5af08336d4ff171604d0a58fd98c815eed9c9a7b61b"} Nov 28 17:17:23 crc kubenswrapper[4954]: I1128 17:17:23.072323 4954 generic.go:334] "Generic (PLEG): container finished" podID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerID="a176b246d620b4817916731115fc503bbe00850f20a09168d69f049b3ce0c4b3" exitCode=0 Nov 28 17:17:23 crc kubenswrapper[4954]: I1128 17:17:23.072382 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnr2n" event={"ID":"bb5154b3-657f-4e42-a0db-8c06ab5b5185","Type":"ContainerDied","Data":"a176b246d620b4817916731115fc503bbe00850f20a09168d69f049b3ce0c4b3"} Nov 28 17:17:23 crc kubenswrapper[4954]: I1128 17:17:23.075054 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:17:25 crc kubenswrapper[4954]: I1128 17:17:25.090178 4954 generic.go:334] "Generic (PLEG): container finished" podID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerID="78e2764041012a23f8c667969ec9acb215df26135b50d1de607654565f986255" exitCode=0 Nov 28 17:17:25 crc kubenswrapper[4954]: I1128 17:17:25.090261 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnr2n" event={"ID":"bb5154b3-657f-4e42-a0db-8c06ab5b5185","Type":"ContainerDied","Data":"78e2764041012a23f8c667969ec9acb215df26135b50d1de607654565f986255"} Nov 28 17:17:26 crc kubenswrapper[4954]: I1128 17:17:26.098650 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnr2n" event={"ID":"bb5154b3-657f-4e42-a0db-8c06ab5b5185","Type":"ContainerStarted","Data":"67a41d1d5a2542f4489e8ba89aaf22c9e1abddae5c042bf88bf2991b7875a7f5"} Nov 28 17:17:26 crc kubenswrapper[4954]: I1128 17:17:26.115614 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cnr2n" podStartSLOduration=2.521054878 podStartE2EDuration="5.115592046s" podCreationTimestamp="2025-11-28 17:17:21 +0000 UTC" firstStartedPulling="2025-11-28 17:17:23.074753325 +0000 UTC m=+3996.466421866" lastFinishedPulling="2025-11-28 17:17:25.669290473 +0000 UTC m=+3999.060959034" observedRunningTime="2025-11-28 17:17:26.113458629 +0000 UTC m=+3999.505127180" watchObservedRunningTime="2025-11-28 17:17:26.115592046 +0000 UTC m=+3999.507260587" Nov 28 17:17:27 crc kubenswrapper[4954]: I1128 17:17:27.880457 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:17:27 crc kubenswrapper[4954]: E1128 17:17:27.881012 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:17:31 crc kubenswrapper[4954]: I1128 17:17:31.504883 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:31 crc kubenswrapper[4954]: I1128 17:17:31.505178 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:31 crc kubenswrapper[4954]: I1128 17:17:31.544423 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:32 crc kubenswrapper[4954]: I1128 17:17:32.204676 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:32 crc kubenswrapper[4954]: I1128 17:17:32.300963 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnr2n"] Nov 28 17:17:34 crc kubenswrapper[4954]: I1128 17:17:34.173160 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cnr2n" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="registry-server" containerID="cri-o://67a41d1d5a2542f4489e8ba89aaf22c9e1abddae5c042bf88bf2991b7875a7f5" gracePeriod=2 Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.185120 4954 generic.go:334] "Generic (PLEG): container finished" podID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerID="67a41d1d5a2542f4489e8ba89aaf22c9e1abddae5c042bf88bf2991b7875a7f5" exitCode=0 Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.185693 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnr2n" event={"ID":"bb5154b3-657f-4e42-a0db-8c06ab5b5185","Type":"ContainerDied","Data":"67a41d1d5a2542f4489e8ba89aaf22c9e1abddae5c042bf88bf2991b7875a7f5"} Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.475075 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.649700 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r526s\" (UniqueName: \"kubernetes.io/projected/bb5154b3-657f-4e42-a0db-8c06ab5b5185-kube-api-access-r526s\") pod \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.649790 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-utilities\") pod \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.649855 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-catalog-content\") pod \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\" (UID: \"bb5154b3-657f-4e42-a0db-8c06ab5b5185\") " Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.651077 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-utilities" (OuterVolumeSpecName: "utilities") pod "bb5154b3-657f-4e42-a0db-8c06ab5b5185" (UID: "bb5154b3-657f-4e42-a0db-8c06ab5b5185"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.657698 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb5154b3-657f-4e42-a0db-8c06ab5b5185-kube-api-access-r526s" (OuterVolumeSpecName: "kube-api-access-r526s") pod "bb5154b3-657f-4e42-a0db-8c06ab5b5185" (UID: "bb5154b3-657f-4e42-a0db-8c06ab5b5185"). InnerVolumeSpecName "kube-api-access-r526s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.693055 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb5154b3-657f-4e42-a0db-8c06ab5b5185" (UID: "bb5154b3-657f-4e42-a0db-8c06ab5b5185"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.751626 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r526s\" (UniqueName: \"kubernetes.io/projected/bb5154b3-657f-4e42-a0db-8c06ab5b5185-kube-api-access-r526s\") on node \"crc\" DevicePath \"\"" Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.751662 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:17:35 crc kubenswrapper[4954]: I1128 17:17:35.751673 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb5154b3-657f-4e42-a0db-8c06ab5b5185-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:17:36 crc kubenswrapper[4954]: I1128 17:17:36.198121 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cnr2n" event={"ID":"bb5154b3-657f-4e42-a0db-8c06ab5b5185","Type":"ContainerDied","Data":"6375dfd1146b382d0bacd5af08336d4ff171604d0a58fd98c815eed9c9a7b61b"} Nov 28 17:17:36 crc kubenswrapper[4954]: I1128 17:17:36.198521 4954 scope.go:117] "RemoveContainer" containerID="67a41d1d5a2542f4489e8ba89aaf22c9e1abddae5c042bf88bf2991b7875a7f5" Nov 28 17:17:36 crc kubenswrapper[4954]: I1128 17:17:36.198167 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cnr2n" Nov 28 17:17:36 crc kubenswrapper[4954]: I1128 17:17:36.222005 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnr2n"] Nov 28 17:17:36 crc kubenswrapper[4954]: I1128 17:17:36.226133 4954 scope.go:117] "RemoveContainer" containerID="78e2764041012a23f8c667969ec9acb215df26135b50d1de607654565f986255" Nov 28 17:17:36 crc kubenswrapper[4954]: I1128 17:17:36.227684 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cnr2n"] Nov 28 17:17:36 crc kubenswrapper[4954]: I1128 17:17:36.243193 4954 scope.go:117] "RemoveContainer" containerID="a176b246d620b4817916731115fc503bbe00850f20a09168d69f049b3ce0c4b3" Nov 28 17:17:37 crc kubenswrapper[4954]: I1128 17:17:37.866079 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" path="/var/lib/kubelet/pods/bb5154b3-657f-4e42-a0db-8c06ab5b5185/volumes" Nov 28 17:17:42 crc kubenswrapper[4954]: I1128 17:17:42.857048 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:17:42 crc kubenswrapper[4954]: E1128 17:17:42.857901 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:17:57 crc kubenswrapper[4954]: I1128 17:17:57.860094 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:17:57 crc kubenswrapper[4954]: E1128 17:17:57.860968 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:18:11 crc kubenswrapper[4954]: I1128 17:18:11.856264 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:18:12 crc kubenswrapper[4954]: I1128 17:18:12.465247 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"616577e95c358da6750ba9f8047fb77e533eba34e29e838e43bd81b354deaca8"} Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.602907 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j68wt"] Nov 28 17:19:08 crc kubenswrapper[4954]: E1128 17:19:08.603912 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="registry-server" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.603930 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="registry-server" Nov 28 17:19:08 crc kubenswrapper[4954]: E1128 17:19:08.603956 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="extract-utilities" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.603963 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="extract-utilities" Nov 28 17:19:08 crc kubenswrapper[4954]: E1128 17:19:08.603977 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="extract-content" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.603985 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="extract-content" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.604146 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb5154b3-657f-4e42-a0db-8c06ab5b5185" containerName="registry-server" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.608740 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.621249 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j68wt"] Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.772297 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746b782a-972a-48ef-b07c-6dedfba8ea94-utilities\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.772381 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746b782a-972a-48ef-b07c-6dedfba8ea94-catalog-content\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.772880 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q2hm\" (UniqueName: \"kubernetes.io/projected/746b782a-972a-48ef-b07c-6dedfba8ea94-kube-api-access-9q2hm\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.875164 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746b782a-972a-48ef-b07c-6dedfba8ea94-utilities\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.875233 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746b782a-972a-48ef-b07c-6dedfba8ea94-catalog-content\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.875290 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q2hm\" (UniqueName: \"kubernetes.io/projected/746b782a-972a-48ef-b07c-6dedfba8ea94-kube-api-access-9q2hm\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.875900 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/746b782a-972a-48ef-b07c-6dedfba8ea94-utilities\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.876369 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/746b782a-972a-48ef-b07c-6dedfba8ea94-catalog-content\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.907929 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q2hm\" (UniqueName: \"kubernetes.io/projected/746b782a-972a-48ef-b07c-6dedfba8ea94-kube-api-access-9q2hm\") pod \"certified-operators-j68wt\" (UID: \"746b782a-972a-48ef-b07c-6dedfba8ea94\") " pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:08 crc kubenswrapper[4954]: I1128 17:19:08.954066 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:09 crc kubenswrapper[4954]: I1128 17:19:09.239122 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j68wt"] Nov 28 17:19:09 crc kubenswrapper[4954]: I1128 17:19:09.942393 4954 generic.go:334] "Generic (PLEG): container finished" podID="746b782a-972a-48ef-b07c-6dedfba8ea94" containerID="d00688cf327390e2cbbd8432d2202ba51feb62a1f108ffcafc695414b4a1519a" exitCode=0 Nov 28 17:19:09 crc kubenswrapper[4954]: I1128 17:19:09.942503 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j68wt" event={"ID":"746b782a-972a-48ef-b07c-6dedfba8ea94","Type":"ContainerDied","Data":"d00688cf327390e2cbbd8432d2202ba51feb62a1f108ffcafc695414b4a1519a"} Nov 28 17:19:09 crc kubenswrapper[4954]: I1128 17:19:09.942783 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j68wt" event={"ID":"746b782a-972a-48ef-b07c-6dedfba8ea94","Type":"ContainerStarted","Data":"5bfc023200e684932d1d436e609a788e835c5bea4f57ee44a1c6d7ade87a26e5"} Nov 28 17:19:13 crc kubenswrapper[4954]: I1128 17:19:13.971155 4954 generic.go:334] "Generic (PLEG): container finished" podID="746b782a-972a-48ef-b07c-6dedfba8ea94" containerID="58f42c6508cc9f7f3a26617725257be1b0a2c7364eb72c2e1986d677f8071050" exitCode=0 Nov 28 17:19:13 crc kubenswrapper[4954]: I1128 17:19:13.971217 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j68wt" event={"ID":"746b782a-972a-48ef-b07c-6dedfba8ea94","Type":"ContainerDied","Data":"58f42c6508cc9f7f3a26617725257be1b0a2c7364eb72c2e1986d677f8071050"} Nov 28 17:19:14 crc kubenswrapper[4954]: I1128 17:19:14.980061 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j68wt" event={"ID":"746b782a-972a-48ef-b07c-6dedfba8ea94","Type":"ContainerStarted","Data":"9a202e64d2df3bdd4d66a02ae47473aeb421266ee78fd6cd56f05f675e1b12da"} Nov 28 17:19:18 crc kubenswrapper[4954]: I1128 17:19:18.954382 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:18 crc kubenswrapper[4954]: I1128 17:19:18.954978 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:18 crc kubenswrapper[4954]: I1128 17:19:18.999159 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:19 crc kubenswrapper[4954]: I1128 17:19:19.048329 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j68wt" Nov 28 17:19:19 crc kubenswrapper[4954]: I1128 17:19:19.048673 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j68wt" podStartSLOduration=6.401442983 podStartE2EDuration="11.048644989s" podCreationTimestamp="2025-11-28 17:19:08 +0000 UTC" firstStartedPulling="2025-11-28 17:19:09.944405996 +0000 UTC m=+4103.336074527" lastFinishedPulling="2025-11-28 17:19:14.591607992 +0000 UTC m=+4107.983276533" observedRunningTime="2025-11-28 17:19:15.001787375 +0000 UTC m=+4108.393455926" watchObservedRunningTime="2025-11-28 17:19:19.048644989 +0000 UTC m=+4112.440313530" Nov 28 17:19:19 crc kubenswrapper[4954]: I1128 17:19:19.111881 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j68wt"] Nov 28 17:19:19 crc kubenswrapper[4954]: I1128 17:19:19.230943 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zzbvs"] Nov 28 17:19:19 crc kubenswrapper[4954]: I1128 17:19:19.231216 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zzbvs" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="registry-server" containerID="cri-o://2a36a6ef09a5b475572e05a42bc3aa4f263f00ab339c6fe803b683e9b0c2f1e9" gracePeriod=2 Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.015454 4954 generic.go:334] "Generic (PLEG): container finished" podID="a8c83942-d8fd-4b50-b873-df962c118505" containerID="2a36a6ef09a5b475572e05a42bc3aa4f263f00ab339c6fe803b683e9b0c2f1e9" exitCode=0 Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.015580 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zzbvs" event={"ID":"a8c83942-d8fd-4b50-b873-df962c118505","Type":"ContainerDied","Data":"2a36a6ef09a5b475572e05a42bc3aa4f263f00ab339c6fe803b683e9b0c2f1e9"} Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.841255 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.853853 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-catalog-content\") pod \"a8c83942-d8fd-4b50-b873-df962c118505\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.853923 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-utilities\") pod \"a8c83942-d8fd-4b50-b873-df962c118505\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.853950 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsf7b\" (UniqueName: \"kubernetes.io/projected/a8c83942-d8fd-4b50-b873-df962c118505-kube-api-access-jsf7b\") pod \"a8c83942-d8fd-4b50-b873-df962c118505\" (UID: \"a8c83942-d8fd-4b50-b873-df962c118505\") " Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.854508 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-utilities" (OuterVolumeSpecName: "utilities") pod "a8c83942-d8fd-4b50-b873-df962c118505" (UID: "a8c83942-d8fd-4b50-b873-df962c118505"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.871908 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c83942-d8fd-4b50-b873-df962c118505-kube-api-access-jsf7b" (OuterVolumeSpecName: "kube-api-access-jsf7b") pod "a8c83942-d8fd-4b50-b873-df962c118505" (UID: "a8c83942-d8fd-4b50-b873-df962c118505"). InnerVolumeSpecName "kube-api-access-jsf7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.913037 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a8c83942-d8fd-4b50-b873-df962c118505" (UID: "a8c83942-d8fd-4b50-b873-df962c118505"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.954959 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.955004 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8c83942-d8fd-4b50-b873-df962c118505-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:19:20 crc kubenswrapper[4954]: I1128 17:19:20.955018 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsf7b\" (UniqueName: \"kubernetes.io/projected/a8c83942-d8fd-4b50-b873-df962c118505-kube-api-access-jsf7b\") on node \"crc\" DevicePath \"\"" Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.023086 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zzbvs" event={"ID":"a8c83942-d8fd-4b50-b873-df962c118505","Type":"ContainerDied","Data":"3a696fbc2e150be5b0d93fe3bdc932c45c1ccb711d0f87222ef94bdb73f00f81"} Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.023153 4954 scope.go:117] "RemoveContainer" containerID="2a36a6ef09a5b475572e05a42bc3aa4f263f00ab339c6fe803b683e9b0c2f1e9" Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.023275 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zzbvs" Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.044741 4954 scope.go:117] "RemoveContainer" containerID="8499b7ad8a61fc1553e04ae7a045270af04df252722b02f43fb3d076cf9e45d9" Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.058750 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zzbvs"] Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.068180 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zzbvs"] Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.095959 4954 scope.go:117] "RemoveContainer" containerID="fb862f1558d8de081470ac882ee3e8d62f693084d625134db0d02eae91158ca0" Nov 28 17:19:21 crc kubenswrapper[4954]: I1128 17:19:21.867223 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c83942-d8fd-4b50-b873-df962c118505" path="/var/lib/kubelet/pods/a8c83942-d8fd-4b50-b873-df962c118505/volumes" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.685987 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gckgv"] Nov 28 17:19:31 crc kubenswrapper[4954]: E1128 17:19:31.687033 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="extract-utilities" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.687050 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="extract-utilities" Nov 28 17:19:31 crc kubenswrapper[4954]: E1128 17:19:31.687071 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="registry-server" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.687078 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="registry-server" Nov 28 17:19:31 crc kubenswrapper[4954]: E1128 17:19:31.687095 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="extract-content" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.687102 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="extract-content" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.687273 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c83942-d8fd-4b50-b873-df962c118505" containerName="registry-server" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.688616 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.695062 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gckgv"] Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.762778 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-catalog-content\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.762952 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-utilities\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.762985 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klcsn\" (UniqueName: \"kubernetes.io/projected/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-kube-api-access-klcsn\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.864763 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-utilities\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.864806 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klcsn\" (UniqueName: \"kubernetes.io/projected/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-kube-api-access-klcsn\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.864860 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-catalog-content\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.865343 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-utilities\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:31 crc kubenswrapper[4954]: I1128 17:19:31.865432 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-catalog-content\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:32 crc kubenswrapper[4954]: I1128 17:19:32.074195 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klcsn\" (UniqueName: \"kubernetes.io/projected/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-kube-api-access-klcsn\") pod \"redhat-operators-gckgv\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:32 crc kubenswrapper[4954]: I1128 17:19:32.320214 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:32 crc kubenswrapper[4954]: I1128 17:19:32.802657 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gckgv"] Nov 28 17:19:33 crc kubenswrapper[4954]: I1128 17:19:33.170325 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gckgv" event={"ID":"a039d429-f0e5-42ac-b8da-cb7c167a8bfb","Type":"ContainerStarted","Data":"0f31a1afde54f74e8fb685822e5e2fd0d6496dd4d3b1bf19f6493a4c11ea5868"} Nov 28 17:19:35 crc kubenswrapper[4954]: I1128 17:19:35.187168 4954 generic.go:334] "Generic (PLEG): container finished" podID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerID="60feff0d9350af870385e52e44c06f9d3ebfd4c7b80bbb74fefd3df194dc4e5c" exitCode=0 Nov 28 17:19:35 crc kubenswrapper[4954]: I1128 17:19:35.187244 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gckgv" event={"ID":"a039d429-f0e5-42ac-b8da-cb7c167a8bfb","Type":"ContainerDied","Data":"60feff0d9350af870385e52e44c06f9d3ebfd4c7b80bbb74fefd3df194dc4e5c"} Nov 28 17:19:37 crc kubenswrapper[4954]: I1128 17:19:37.211297 4954 generic.go:334] "Generic (PLEG): container finished" podID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerID="77aaebe642bb95681b7c48ed8cb568e20e7503f77a0a194779d3fbef12ae7e44" exitCode=0 Nov 28 17:19:37 crc kubenswrapper[4954]: I1128 17:19:37.211383 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gckgv" event={"ID":"a039d429-f0e5-42ac-b8da-cb7c167a8bfb","Type":"ContainerDied","Data":"77aaebe642bb95681b7c48ed8cb568e20e7503f77a0a194779d3fbef12ae7e44"} Nov 28 17:19:38 crc kubenswrapper[4954]: I1128 17:19:38.223162 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gckgv" event={"ID":"a039d429-f0e5-42ac-b8da-cb7c167a8bfb","Type":"ContainerStarted","Data":"75356e1431174d77dba9b888d677156c3a61d656eabfa7a036131cac296b3344"} Nov 28 17:19:38 crc kubenswrapper[4954]: I1128 17:19:38.243375 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gckgv" podStartSLOduration=4.800256427 podStartE2EDuration="7.243353476s" podCreationTimestamp="2025-11-28 17:19:31 +0000 UTC" firstStartedPulling="2025-11-28 17:19:35.18869522 +0000 UTC m=+4128.580363761" lastFinishedPulling="2025-11-28 17:19:37.631792269 +0000 UTC m=+4131.023460810" observedRunningTime="2025-11-28 17:19:38.239887948 +0000 UTC m=+4131.631556499" watchObservedRunningTime="2025-11-28 17:19:38.243353476 +0000 UTC m=+4131.635022017" Nov 28 17:19:42 crc kubenswrapper[4954]: I1128 17:19:42.321013 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:42 crc kubenswrapper[4954]: I1128 17:19:42.321648 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:43 crc kubenswrapper[4954]: I1128 17:19:43.362348 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gckgv" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="registry-server" probeResult="failure" output=< Nov 28 17:19:43 crc kubenswrapper[4954]: timeout: failed to connect service ":50051" within 1s Nov 28 17:19:43 crc kubenswrapper[4954]: > Nov 28 17:19:52 crc kubenswrapper[4954]: I1128 17:19:52.364913 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:52 crc kubenswrapper[4954]: I1128 17:19:52.408634 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:52 crc kubenswrapper[4954]: I1128 17:19:52.607942 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gckgv"] Nov 28 17:19:54 crc kubenswrapper[4954]: I1128 17:19:54.333515 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gckgv" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="registry-server" containerID="cri-o://75356e1431174d77dba9b888d677156c3a61d656eabfa7a036131cac296b3344" gracePeriod=2 Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.345759 4954 generic.go:334] "Generic (PLEG): container finished" podID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerID="75356e1431174d77dba9b888d677156c3a61d656eabfa7a036131cac296b3344" exitCode=0 Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.345848 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gckgv" event={"ID":"a039d429-f0e5-42ac-b8da-cb7c167a8bfb","Type":"ContainerDied","Data":"75356e1431174d77dba9b888d677156c3a61d656eabfa7a036131cac296b3344"} Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.819471 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.940160 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klcsn\" (UniqueName: \"kubernetes.io/projected/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-kube-api-access-klcsn\") pod \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.940310 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-utilities\") pod \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.940376 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-catalog-content\") pod \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\" (UID: \"a039d429-f0e5-42ac-b8da-cb7c167a8bfb\") " Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.941444 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-utilities" (OuterVolumeSpecName: "utilities") pod "a039d429-f0e5-42ac-b8da-cb7c167a8bfb" (UID: "a039d429-f0e5-42ac-b8da-cb7c167a8bfb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.941974 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:19:55 crc kubenswrapper[4954]: I1128 17:19:55.948859 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-kube-api-access-klcsn" (OuterVolumeSpecName: "kube-api-access-klcsn") pod "a039d429-f0e5-42ac-b8da-cb7c167a8bfb" (UID: "a039d429-f0e5-42ac-b8da-cb7c167a8bfb"). InnerVolumeSpecName "kube-api-access-klcsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.043564 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klcsn\" (UniqueName: \"kubernetes.io/projected/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-kube-api-access-klcsn\") on node \"crc\" DevicePath \"\"" Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.069329 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a039d429-f0e5-42ac-b8da-cb7c167a8bfb" (UID: "a039d429-f0e5-42ac-b8da-cb7c167a8bfb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.144853 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a039d429-f0e5-42ac-b8da-cb7c167a8bfb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.358291 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gckgv" event={"ID":"a039d429-f0e5-42ac-b8da-cb7c167a8bfb","Type":"ContainerDied","Data":"0f31a1afde54f74e8fb685822e5e2fd0d6496dd4d3b1bf19f6493a4c11ea5868"} Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.358371 4954 scope.go:117] "RemoveContainer" containerID="75356e1431174d77dba9b888d677156c3a61d656eabfa7a036131cac296b3344" Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.358384 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gckgv" Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.385489 4954 scope.go:117] "RemoveContainer" containerID="77aaebe642bb95681b7c48ed8cb568e20e7503f77a0a194779d3fbef12ae7e44" Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.399376 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gckgv"] Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.406205 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gckgv"] Nov 28 17:19:56 crc kubenswrapper[4954]: I1128 17:19:56.416751 4954 scope.go:117] "RemoveContainer" containerID="60feff0d9350af870385e52e44c06f9d3ebfd4c7b80bbb74fefd3df194dc4e5c" Nov 28 17:19:57 crc kubenswrapper[4954]: I1128 17:19:57.866008 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" path="/var/lib/kubelet/pods/a039d429-f0e5-42ac-b8da-cb7c167a8bfb/volumes" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.221325 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-v67zs"] Nov 28 17:20:26 crc kubenswrapper[4954]: E1128 17:20:26.222814 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="extract-content" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.222846 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="extract-content" Nov 28 17:20:26 crc kubenswrapper[4954]: E1128 17:20:26.222909 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="registry-server" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.222932 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="registry-server" Nov 28 17:20:26 crc kubenswrapper[4954]: E1128 17:20:26.223114 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="extract-utilities" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.223125 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="extract-utilities" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.224857 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="a039d429-f0e5-42ac-b8da-cb7c167a8bfb" containerName="registry-server" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.227863 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.240912 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v67zs"] Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.358397 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w75td\" (UniqueName: \"kubernetes.io/projected/de5c3dfd-2622-4991-82a5-e49bf86c89e1-kube-api-access-w75td\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.358448 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-utilities\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.358671 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-catalog-content\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.460011 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w75td\" (UniqueName: \"kubernetes.io/projected/de5c3dfd-2622-4991-82a5-e49bf86c89e1-kube-api-access-w75td\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.460107 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-utilities\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.460166 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-catalog-content\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.460696 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-catalog-content\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.460792 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-utilities\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.482732 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w75td\" (UniqueName: \"kubernetes.io/projected/de5c3dfd-2622-4991-82a5-e49bf86c89e1-kube-api-access-w75td\") pod \"community-operators-v67zs\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:26 crc kubenswrapper[4954]: I1128 17:20:26.568939 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:27 crc kubenswrapper[4954]: I1128 17:20:27.070763 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-v67zs"] Nov 28 17:20:27 crc kubenswrapper[4954]: W1128 17:20:27.075698 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde5c3dfd_2622_4991_82a5_e49bf86c89e1.slice/crio-4cfaebaea622a4d02b9547c4d71ded350abbc51ddd4574f9c940bfad7d49ea66 WatchSource:0}: Error finding container 4cfaebaea622a4d02b9547c4d71ded350abbc51ddd4574f9c940bfad7d49ea66: Status 404 returned error can't find the container with id 4cfaebaea622a4d02b9547c4d71ded350abbc51ddd4574f9c940bfad7d49ea66 Nov 28 17:20:27 crc kubenswrapper[4954]: I1128 17:20:27.615131 4954 generic.go:334] "Generic (PLEG): container finished" podID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerID="a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf" exitCode=0 Nov 28 17:20:27 crc kubenswrapper[4954]: I1128 17:20:27.615186 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v67zs" event={"ID":"de5c3dfd-2622-4991-82a5-e49bf86c89e1","Type":"ContainerDied","Data":"a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf"} Nov 28 17:20:27 crc kubenswrapper[4954]: I1128 17:20:27.615216 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v67zs" event={"ID":"de5c3dfd-2622-4991-82a5-e49bf86c89e1","Type":"ContainerStarted","Data":"4cfaebaea622a4d02b9547c4d71ded350abbc51ddd4574f9c940bfad7d49ea66"} Nov 28 17:20:29 crc kubenswrapper[4954]: I1128 17:20:29.632036 4954 generic.go:334] "Generic (PLEG): container finished" podID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerID="fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769" exitCode=0 Nov 28 17:20:29 crc kubenswrapper[4954]: I1128 17:20:29.632121 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v67zs" event={"ID":"de5c3dfd-2622-4991-82a5-e49bf86c89e1","Type":"ContainerDied","Data":"fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769"} Nov 28 17:20:31 crc kubenswrapper[4954]: I1128 17:20:31.647950 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v67zs" event={"ID":"de5c3dfd-2622-4991-82a5-e49bf86c89e1","Type":"ContainerStarted","Data":"4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee"} Nov 28 17:20:31 crc kubenswrapper[4954]: I1128 17:20:31.669310 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-v67zs" podStartSLOduration=2.832957802 podStartE2EDuration="5.669294264s" podCreationTimestamp="2025-11-28 17:20:26 +0000 UTC" firstStartedPulling="2025-11-28 17:20:27.61754427 +0000 UTC m=+4181.009212801" lastFinishedPulling="2025-11-28 17:20:30.453880722 +0000 UTC m=+4183.845549263" observedRunningTime="2025-11-28 17:20:31.663913584 +0000 UTC m=+4185.055582135" watchObservedRunningTime="2025-11-28 17:20:31.669294264 +0000 UTC m=+4185.060962805" Nov 28 17:20:32 crc kubenswrapper[4954]: I1128 17:20:32.480592 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:20:32 crc kubenswrapper[4954]: I1128 17:20:32.480969 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:20:36 crc kubenswrapper[4954]: I1128 17:20:36.569756 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:36 crc kubenswrapper[4954]: I1128 17:20:36.570247 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:36 crc kubenswrapper[4954]: I1128 17:20:36.617233 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:36 crc kubenswrapper[4954]: I1128 17:20:36.737915 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:40 crc kubenswrapper[4954]: I1128 17:20:40.796046 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v67zs"] Nov 28 17:20:40 crc kubenswrapper[4954]: I1128 17:20:40.796748 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-v67zs" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="registry-server" containerID="cri-o://4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee" gracePeriod=2 Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.666110 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.728839 4954 generic.go:334] "Generic (PLEG): container finished" podID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerID="4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee" exitCode=0 Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.728883 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-v67zs" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.728933 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v67zs" event={"ID":"de5c3dfd-2622-4991-82a5-e49bf86c89e1","Type":"ContainerDied","Data":"4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee"} Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.728958 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-v67zs" event={"ID":"de5c3dfd-2622-4991-82a5-e49bf86c89e1","Type":"ContainerDied","Data":"4cfaebaea622a4d02b9547c4d71ded350abbc51ddd4574f9c940bfad7d49ea66"} Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.728974 4954 scope.go:117] "RemoveContainer" containerID="4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.745597 4954 scope.go:117] "RemoveContainer" containerID="fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.760557 4954 scope.go:117] "RemoveContainer" containerID="a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.768705 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-utilities\") pod \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.768772 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-catalog-content\") pod \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.768939 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w75td\" (UniqueName: \"kubernetes.io/projected/de5c3dfd-2622-4991-82a5-e49bf86c89e1-kube-api-access-w75td\") pod \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\" (UID: \"de5c3dfd-2622-4991-82a5-e49bf86c89e1\") " Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.769804 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-utilities" (OuterVolumeSpecName: "utilities") pod "de5c3dfd-2622-4991-82a5-e49bf86c89e1" (UID: "de5c3dfd-2622-4991-82a5-e49bf86c89e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.774816 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de5c3dfd-2622-4991-82a5-e49bf86c89e1-kube-api-access-w75td" (OuterVolumeSpecName: "kube-api-access-w75td") pod "de5c3dfd-2622-4991-82a5-e49bf86c89e1" (UID: "de5c3dfd-2622-4991-82a5-e49bf86c89e1"). InnerVolumeSpecName "kube-api-access-w75td". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.786815 4954 scope.go:117] "RemoveContainer" containerID="4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee" Nov 28 17:20:41 crc kubenswrapper[4954]: E1128 17:20:41.788195 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee\": container with ID starting with 4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee not found: ID does not exist" containerID="4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.788274 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee"} err="failed to get container status \"4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee\": rpc error: code = NotFound desc = could not find container \"4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee\": container with ID starting with 4760b17b185a574b3ff8953767fb5d9ffa67a5e652a582c8d73f39c74a78bbee not found: ID does not exist" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.788310 4954 scope.go:117] "RemoveContainer" containerID="fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769" Nov 28 17:20:41 crc kubenswrapper[4954]: E1128 17:20:41.789279 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769\": container with ID starting with fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769 not found: ID does not exist" containerID="fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.789319 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769"} err="failed to get container status \"fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769\": rpc error: code = NotFound desc = could not find container \"fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769\": container with ID starting with fa0bb538d7b0634312b985cce371824cceb1a0fbbc5c0bc4e5d3877bb202a769 not found: ID does not exist" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.789347 4954 scope.go:117] "RemoveContainer" containerID="a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf" Nov 28 17:20:41 crc kubenswrapper[4954]: E1128 17:20:41.789917 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf\": container with ID starting with a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf not found: ID does not exist" containerID="a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.789979 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf"} err="failed to get container status \"a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf\": rpc error: code = NotFound desc = could not find container \"a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf\": container with ID starting with a3b4513da4562f2772a8e5bdc9036948c806084c2331219d9979114d9915eadf not found: ID does not exist" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.848399 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de5c3dfd-2622-4991-82a5-e49bf86c89e1" (UID: "de5c3dfd-2622-4991-82a5-e49bf86c89e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.870403 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.870441 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de5c3dfd-2622-4991-82a5-e49bf86c89e1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:20:41 crc kubenswrapper[4954]: I1128 17:20:41.870455 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w75td\" (UniqueName: \"kubernetes.io/projected/de5c3dfd-2622-4991-82a5-e49bf86c89e1-kube-api-access-w75td\") on node \"crc\" DevicePath \"\"" Nov 28 17:20:42 crc kubenswrapper[4954]: I1128 17:20:42.051655 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-v67zs"] Nov 28 17:20:42 crc kubenswrapper[4954]: I1128 17:20:42.058036 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-v67zs"] Nov 28 17:20:43 crc kubenswrapper[4954]: I1128 17:20:43.865055 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" path="/var/lib/kubelet/pods/de5c3dfd-2622-4991-82a5-e49bf86c89e1/volumes" Nov 28 17:21:02 crc kubenswrapper[4954]: I1128 17:21:02.480584 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:21:02 crc kubenswrapper[4954]: I1128 17:21:02.481180 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:21:32 crc kubenswrapper[4954]: I1128 17:21:32.481158 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:21:32 crc kubenswrapper[4954]: I1128 17:21:32.481818 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:21:32 crc kubenswrapper[4954]: I1128 17:21:32.481865 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 17:21:32 crc kubenswrapper[4954]: I1128 17:21:32.482500 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"616577e95c358da6750ba9f8047fb77e533eba34e29e838e43bd81b354deaca8"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:21:32 crc kubenswrapper[4954]: I1128 17:21:32.482576 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://616577e95c358da6750ba9f8047fb77e533eba34e29e838e43bd81b354deaca8" gracePeriod=600 Nov 28 17:21:33 crc kubenswrapper[4954]: I1128 17:21:33.123814 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="616577e95c358da6750ba9f8047fb77e533eba34e29e838e43bd81b354deaca8" exitCode=0 Nov 28 17:21:33 crc kubenswrapper[4954]: I1128 17:21:33.123885 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"616577e95c358da6750ba9f8047fb77e533eba34e29e838e43bd81b354deaca8"} Nov 28 17:21:33 crc kubenswrapper[4954]: I1128 17:21:33.124688 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1"} Nov 28 17:21:33 crc kubenswrapper[4954]: I1128 17:21:33.124726 4954 scope.go:117] "RemoveContainer" containerID="dc18da245ffebecd31252277da7c5a00e6cc9b2ad0a386dd54b3e01c5fd79645" Nov 28 17:23:32 crc kubenswrapper[4954]: I1128 17:23:32.480576 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:23:32 crc kubenswrapper[4954]: I1128 17:23:32.481225 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:24:02 crc kubenswrapper[4954]: I1128 17:24:02.481054 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:24:02 crc kubenswrapper[4954]: I1128 17:24:02.481472 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:24:32 crc kubenswrapper[4954]: I1128 17:24:32.481236 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:24:32 crc kubenswrapper[4954]: I1128 17:24:32.481850 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:24:32 crc kubenswrapper[4954]: I1128 17:24:32.481900 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 17:24:32 crc kubenswrapper[4954]: I1128 17:24:32.482547 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:24:32 crc kubenswrapper[4954]: I1128 17:24:32.482600 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" gracePeriod=600 Nov 28 17:24:32 crc kubenswrapper[4954]: E1128 17:24:32.613281 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:24:33 crc kubenswrapper[4954]: I1128 17:24:33.381200 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" exitCode=0 Nov 28 17:24:33 crc kubenswrapper[4954]: I1128 17:24:33.381249 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1"} Nov 28 17:24:33 crc kubenswrapper[4954]: I1128 17:24:33.381285 4954 scope.go:117] "RemoveContainer" containerID="616577e95c358da6750ba9f8047fb77e533eba34e29e838e43bd81b354deaca8" Nov 28 17:24:33 crc kubenswrapper[4954]: I1128 17:24:33.381875 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:24:33 crc kubenswrapper[4954]: E1128 17:24:33.382161 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:24:47 crc kubenswrapper[4954]: I1128 17:24:47.862978 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:24:47 crc kubenswrapper[4954]: E1128 17:24:47.863505 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:25:00 crc kubenswrapper[4954]: I1128 17:25:00.856663 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:25:00 crc kubenswrapper[4954]: E1128 17:25:00.857349 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:25:11 crc kubenswrapper[4954]: I1128 17:25:11.857154 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:25:11 crc kubenswrapper[4954]: E1128 17:25:11.858000 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:25:25 crc kubenswrapper[4954]: I1128 17:25:25.856435 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:25:25 crc kubenswrapper[4954]: E1128 17:25:25.857274 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:25:37 crc kubenswrapper[4954]: I1128 17:25:37.861846 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:25:37 crc kubenswrapper[4954]: E1128 17:25:37.862585 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:25:50 crc kubenswrapper[4954]: I1128 17:25:50.856985 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:25:50 crc kubenswrapper[4954]: E1128 17:25:50.857821 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:26:02 crc kubenswrapper[4954]: I1128 17:26:02.856225 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:26:02 crc kubenswrapper[4954]: E1128 17:26:02.856927 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:26:14 crc kubenswrapper[4954]: I1128 17:26:14.856827 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:26:14 crc kubenswrapper[4954]: E1128 17:26:14.857734 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:26:25 crc kubenswrapper[4954]: I1128 17:26:25.856705 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:26:25 crc kubenswrapper[4954]: E1128 17:26:25.857381 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:26:40 crc kubenswrapper[4954]: I1128 17:26:40.856665 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:26:40 crc kubenswrapper[4954]: E1128 17:26:40.857453 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:26:51 crc kubenswrapper[4954]: I1128 17:26:51.856398 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:26:51 crc kubenswrapper[4954]: E1128 17:26:51.857241 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:27:04 crc kubenswrapper[4954]: I1128 17:27:04.856950 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:27:04 crc kubenswrapper[4954]: E1128 17:27:04.857815 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:27:15 crc kubenswrapper[4954]: I1128 17:27:15.856000 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:27:15 crc kubenswrapper[4954]: E1128 17:27:15.856734 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:27:27 crc kubenswrapper[4954]: I1128 17:27:27.862416 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:27:27 crc kubenswrapper[4954]: E1128 17:27:27.864906 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:27:41 crc kubenswrapper[4954]: I1128 17:27:41.856768 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:27:41 crc kubenswrapper[4954]: E1128 17:27:41.857571 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:27:54 crc kubenswrapper[4954]: I1128 17:27:54.856619 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:27:54 crc kubenswrapper[4954]: E1128 17:27:54.857235 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:28:07 crc kubenswrapper[4954]: I1128 17:28:07.863775 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:28:07 crc kubenswrapper[4954]: E1128 17:28:07.865153 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:28:19 crc kubenswrapper[4954]: I1128 17:28:19.855967 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:28:19 crc kubenswrapper[4954]: E1128 17:28:19.856948 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:28:31 crc kubenswrapper[4954]: I1128 17:28:31.954083 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qv4wj"] Nov 28 17:28:31 crc kubenswrapper[4954]: E1128 17:28:31.955005 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="extract-utilities" Nov 28 17:28:31 crc kubenswrapper[4954]: I1128 17:28:31.955024 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="extract-utilities" Nov 28 17:28:31 crc kubenswrapper[4954]: E1128 17:28:31.955046 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="extract-content" Nov 28 17:28:31 crc kubenswrapper[4954]: I1128 17:28:31.955056 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="extract-content" Nov 28 17:28:31 crc kubenswrapper[4954]: E1128 17:28:31.955072 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="registry-server" Nov 28 17:28:31 crc kubenswrapper[4954]: I1128 17:28:31.955080 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="registry-server" Nov 28 17:28:31 crc kubenswrapper[4954]: I1128 17:28:31.955255 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="de5c3dfd-2622-4991-82a5-e49bf86c89e1" containerName="registry-server" Nov 28 17:28:31 crc kubenswrapper[4954]: I1128 17:28:31.956575 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:31 crc kubenswrapper[4954]: I1128 17:28:31.968009 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qv4wj"] Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.144371 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-catalog-content\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.144430 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-utilities\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.144475 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c6b2\" (UniqueName: \"kubernetes.io/projected/f5c147b1-21ed-4b14-b26f-c9969f0abc89-kube-api-access-9c6b2\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.246137 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c6b2\" (UniqueName: \"kubernetes.io/projected/f5c147b1-21ed-4b14-b26f-c9969f0abc89-kube-api-access-9c6b2\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.246306 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-catalog-content\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.246346 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-utilities\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.246910 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-catalog-content\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.247363 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-utilities\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.267161 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c6b2\" (UniqueName: \"kubernetes.io/projected/f5c147b1-21ed-4b14-b26f-c9969f0abc89-kube-api-access-9c6b2\") pod \"redhat-marketplace-qv4wj\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.272510 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:32 crc kubenswrapper[4954]: I1128 17:28:32.512515 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qv4wj"] Nov 28 17:28:33 crc kubenswrapper[4954]: I1128 17:28:33.248191 4954 generic.go:334] "Generic (PLEG): container finished" podID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerID="37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4" exitCode=0 Nov 28 17:28:33 crc kubenswrapper[4954]: I1128 17:28:33.248387 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qv4wj" event={"ID":"f5c147b1-21ed-4b14-b26f-c9969f0abc89","Type":"ContainerDied","Data":"37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4"} Nov 28 17:28:33 crc kubenswrapper[4954]: I1128 17:28:33.248607 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qv4wj" event={"ID":"f5c147b1-21ed-4b14-b26f-c9969f0abc89","Type":"ContainerStarted","Data":"851ced595e3696c38d5e12344f0b2fa1f76847ea3a3117a9f07a2d9808a488cd"} Nov 28 17:28:33 crc kubenswrapper[4954]: I1128 17:28:33.251300 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:28:33 crc kubenswrapper[4954]: I1128 17:28:33.872565 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:28:33 crc kubenswrapper[4954]: E1128 17:28:33.872852 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:28:35 crc kubenswrapper[4954]: I1128 17:28:35.270059 4954 generic.go:334] "Generic (PLEG): container finished" podID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerID="16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc" exitCode=0 Nov 28 17:28:35 crc kubenswrapper[4954]: I1128 17:28:35.270124 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qv4wj" event={"ID":"f5c147b1-21ed-4b14-b26f-c9969f0abc89","Type":"ContainerDied","Data":"16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc"} Nov 28 17:28:36 crc kubenswrapper[4954]: I1128 17:28:36.279342 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qv4wj" event={"ID":"f5c147b1-21ed-4b14-b26f-c9969f0abc89","Type":"ContainerStarted","Data":"94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579"} Nov 28 17:28:36 crc kubenswrapper[4954]: I1128 17:28:36.300263 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qv4wj" podStartSLOduration=2.6318588529999998 podStartE2EDuration="5.30024455s" podCreationTimestamp="2025-11-28 17:28:31 +0000 UTC" firstStartedPulling="2025-11-28 17:28:33.251031244 +0000 UTC m=+4666.642699785" lastFinishedPulling="2025-11-28 17:28:35.919416941 +0000 UTC m=+4669.311085482" observedRunningTime="2025-11-28 17:28:36.297015778 +0000 UTC m=+4669.688684319" watchObservedRunningTime="2025-11-28 17:28:36.30024455 +0000 UTC m=+4669.691913091" Nov 28 17:28:42 crc kubenswrapper[4954]: I1128 17:28:42.273068 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:42 crc kubenswrapper[4954]: I1128 17:28:42.273629 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:42 crc kubenswrapper[4954]: I1128 17:28:42.318653 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:42 crc kubenswrapper[4954]: I1128 17:28:42.358304 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:42 crc kubenswrapper[4954]: I1128 17:28:42.554798 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qv4wj"] Nov 28 17:28:44 crc kubenswrapper[4954]: I1128 17:28:44.336774 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qv4wj" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="registry-server" containerID="cri-o://94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579" gracePeriod=2 Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.326431 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.359737 4954 generic.go:334] "Generic (PLEG): container finished" podID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerID="94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579" exitCode=0 Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.359862 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qv4wj" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.360230 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qv4wj" event={"ID":"f5c147b1-21ed-4b14-b26f-c9969f0abc89","Type":"ContainerDied","Data":"94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579"} Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.360258 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qv4wj" event={"ID":"f5c147b1-21ed-4b14-b26f-c9969f0abc89","Type":"ContainerDied","Data":"851ced595e3696c38d5e12344f0b2fa1f76847ea3a3117a9f07a2d9808a488cd"} Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.360274 4954 scope.go:117] "RemoveContainer" containerID="94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.380451 4954 scope.go:117] "RemoveContainer" containerID="16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.401107 4954 scope.go:117] "RemoveContainer" containerID="37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.427177 4954 scope.go:117] "RemoveContainer" containerID="94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579" Nov 28 17:28:45 crc kubenswrapper[4954]: E1128 17:28:45.427570 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579\": container with ID starting with 94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579 not found: ID does not exist" containerID="94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.427607 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579"} err="failed to get container status \"94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579\": rpc error: code = NotFound desc = could not find container \"94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579\": container with ID starting with 94c5fe049cb8ffac10de18a8c165941e398da172f5ee3a087f81298467982579 not found: ID does not exist" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.427630 4954 scope.go:117] "RemoveContainer" containerID="16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc" Nov 28 17:28:45 crc kubenswrapper[4954]: E1128 17:28:45.427887 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc\": container with ID starting with 16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc not found: ID does not exist" containerID="16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.427912 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc"} err="failed to get container status \"16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc\": rpc error: code = NotFound desc = could not find container \"16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc\": container with ID starting with 16861e49b6b6ee2c7aff6534e7f99ac610b71154939faea1d1dc70997f090dfc not found: ID does not exist" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.427928 4954 scope.go:117] "RemoveContainer" containerID="37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4" Nov 28 17:28:45 crc kubenswrapper[4954]: E1128 17:28:45.428156 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4\": container with ID starting with 37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4 not found: ID does not exist" containerID="37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.428173 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4"} err="failed to get container status \"37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4\": rpc error: code = NotFound desc = could not find container \"37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4\": container with ID starting with 37b249e0703e9ce234c41a87d4f8f8955a18a046dad42be7a47912457d8a06e4 not found: ID does not exist" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.477062 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-utilities\") pod \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.477260 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c6b2\" (UniqueName: \"kubernetes.io/projected/f5c147b1-21ed-4b14-b26f-c9969f0abc89-kube-api-access-9c6b2\") pod \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.477303 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-catalog-content\") pod \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\" (UID: \"f5c147b1-21ed-4b14-b26f-c9969f0abc89\") " Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.478719 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-utilities" (OuterVolumeSpecName: "utilities") pod "f5c147b1-21ed-4b14-b26f-c9969f0abc89" (UID: "f5c147b1-21ed-4b14-b26f-c9969f0abc89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.482759 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5c147b1-21ed-4b14-b26f-c9969f0abc89-kube-api-access-9c6b2" (OuterVolumeSpecName: "kube-api-access-9c6b2") pod "f5c147b1-21ed-4b14-b26f-c9969f0abc89" (UID: "f5c147b1-21ed-4b14-b26f-c9969f0abc89"). InnerVolumeSpecName "kube-api-access-9c6b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.498898 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5c147b1-21ed-4b14-b26f-c9969f0abc89" (UID: "f5c147b1-21ed-4b14-b26f-c9969f0abc89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.578352 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c6b2\" (UniqueName: \"kubernetes.io/projected/f5c147b1-21ed-4b14-b26f-c9969f0abc89-kube-api-access-9c6b2\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.578388 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.578400 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5c147b1-21ed-4b14-b26f-c9969f0abc89-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.689782 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qv4wj"] Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.698620 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qv4wj"] Nov 28 17:28:45 crc kubenswrapper[4954]: I1128 17:28:45.864557 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" path="/var/lib/kubelet/pods/f5c147b1-21ed-4b14-b26f-c9969f0abc89/volumes" Nov 28 17:28:47 crc kubenswrapper[4954]: I1128 17:28:47.860894 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:28:47 crc kubenswrapper[4954]: E1128 17:28:47.861846 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:28:59 crc kubenswrapper[4954]: I1128 17:28:59.856606 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:28:59 crc kubenswrapper[4954]: E1128 17:28:59.857653 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:29:14 crc kubenswrapper[4954]: I1128 17:29:14.856246 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:29:14 crc kubenswrapper[4954]: E1128 17:29:14.857111 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:29:29 crc kubenswrapper[4954]: I1128 17:29:29.856900 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:29:29 crc kubenswrapper[4954]: E1128 17:29:29.857679 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:29:41 crc kubenswrapper[4954]: I1128 17:29:41.856459 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:29:42 crc kubenswrapper[4954]: I1128 17:29:42.781702 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"9d38eb7f33f90e48d53b26448b4f83258a9ea24c8166e6f7984877f8f9b54aff"} Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.769616 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9mclb"] Nov 28 17:29:49 crc kubenswrapper[4954]: E1128 17:29:49.770902 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="extract-content" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.770927 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="extract-content" Nov 28 17:29:49 crc kubenswrapper[4954]: E1128 17:29:49.770946 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="extract-utilities" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.770960 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="extract-utilities" Nov 28 17:29:49 crc kubenswrapper[4954]: E1128 17:29:49.770994 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="registry-server" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.771006 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="registry-server" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.771258 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5c147b1-21ed-4b14-b26f-c9969f0abc89" containerName="registry-server" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.773285 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.779943 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9mclb"] Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.888108 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpzm2\" (UniqueName: \"kubernetes.io/projected/fd9a6132-ac43-460c-abaa-a3412bb463b0-kube-api-access-gpzm2\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.888165 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-catalog-content\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.888256 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-utilities\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.988916 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-utilities\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.989010 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpzm2\" (UniqueName: \"kubernetes.io/projected/fd9a6132-ac43-460c-abaa-a3412bb463b0-kube-api-access-gpzm2\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.989044 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-catalog-content\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.989393 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-utilities\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:49 crc kubenswrapper[4954]: I1128 17:29:49.989914 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-catalog-content\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:50 crc kubenswrapper[4954]: I1128 17:29:50.012347 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpzm2\" (UniqueName: \"kubernetes.io/projected/fd9a6132-ac43-460c-abaa-a3412bb463b0-kube-api-access-gpzm2\") pod \"certified-operators-9mclb\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:50 crc kubenswrapper[4954]: I1128 17:29:50.102577 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:29:50 crc kubenswrapper[4954]: I1128 17:29:50.613684 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9mclb"] Nov 28 17:29:50 crc kubenswrapper[4954]: I1128 17:29:50.836623 4954 generic.go:334] "Generic (PLEG): container finished" podID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerID="8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098" exitCode=0 Nov 28 17:29:50 crc kubenswrapper[4954]: I1128 17:29:50.836700 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mclb" event={"ID":"fd9a6132-ac43-460c-abaa-a3412bb463b0","Type":"ContainerDied","Data":"8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098"} Nov 28 17:29:50 crc kubenswrapper[4954]: I1128 17:29:50.837293 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mclb" event={"ID":"fd9a6132-ac43-460c-abaa-a3412bb463b0","Type":"ContainerStarted","Data":"cd3e5d3fe4a47e717a907af055765da3061236c6e2865753f4043deb26e70e80"} Nov 28 17:29:51 crc kubenswrapper[4954]: I1128 17:29:51.851262 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mclb" event={"ID":"fd9a6132-ac43-460c-abaa-a3412bb463b0","Type":"ContainerStarted","Data":"339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9"} Nov 28 17:29:52 crc kubenswrapper[4954]: I1128 17:29:52.862662 4954 generic.go:334] "Generic (PLEG): container finished" podID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerID="339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9" exitCode=0 Nov 28 17:29:52 crc kubenswrapper[4954]: I1128 17:29:52.862707 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mclb" event={"ID":"fd9a6132-ac43-460c-abaa-a3412bb463b0","Type":"ContainerDied","Data":"339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9"} Nov 28 17:29:53 crc kubenswrapper[4954]: I1128 17:29:53.874392 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mclb" event={"ID":"fd9a6132-ac43-460c-abaa-a3412bb463b0","Type":"ContainerStarted","Data":"021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859"} Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.103816 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.104492 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.151503 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.152568 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9mclb" podStartSLOduration=8.379696261 podStartE2EDuration="11.152555398s" podCreationTimestamp="2025-11-28 17:29:49 +0000 UTC" firstStartedPulling="2025-11-28 17:29:50.838237374 +0000 UTC m=+4744.229905915" lastFinishedPulling="2025-11-28 17:29:53.611096501 +0000 UTC m=+4747.002765052" observedRunningTime="2025-11-28 17:29:53.89598178 +0000 UTC m=+4747.287650321" watchObservedRunningTime="2025-11-28 17:30:00.152555398 +0000 UTC m=+4753.544223939" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.152981 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v"] Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.153952 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.156367 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.156378 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.163748 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v"] Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.350808 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/861f3e46-6a29-458a-8c7d-50f3f00ec98b-secret-volume\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.350939 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vstp\" (UniqueName: \"kubernetes.io/projected/861f3e46-6a29-458a-8c7d-50f3f00ec98b-kube-api-access-2vstp\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.351025 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/861f3e46-6a29-458a-8c7d-50f3f00ec98b-config-volume\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.452071 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/861f3e46-6a29-458a-8c7d-50f3f00ec98b-secret-volume\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.452167 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vstp\" (UniqueName: \"kubernetes.io/projected/861f3e46-6a29-458a-8c7d-50f3f00ec98b-kube-api-access-2vstp\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.452253 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/861f3e46-6a29-458a-8c7d-50f3f00ec98b-config-volume\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.453314 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/861f3e46-6a29-458a-8c7d-50f3f00ec98b-config-volume\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.462212 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/861f3e46-6a29-458a-8c7d-50f3f00ec98b-secret-volume\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.470305 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vstp\" (UniqueName: \"kubernetes.io/projected/861f3e46-6a29-458a-8c7d-50f3f00ec98b-kube-api-access-2vstp\") pod \"collect-profiles-29405850-hgq2v\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.475637 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.893940 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v"] Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.920707 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" event={"ID":"861f3e46-6a29-458a-8c7d-50f3f00ec98b","Type":"ContainerStarted","Data":"7184bda43beae4873695d4ebfb6ad3d0be3974a2c9f8f1bee06aff5a31e594ae"} Nov 28 17:30:00 crc kubenswrapper[4954]: I1128 17:30:00.963308 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:30:01 crc kubenswrapper[4954]: I1128 17:30:01.014992 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9mclb"] Nov 28 17:30:01 crc kubenswrapper[4954]: I1128 17:30:01.927897 4954 generic.go:334] "Generic (PLEG): container finished" podID="861f3e46-6a29-458a-8c7d-50f3f00ec98b" containerID="513617cf663e8341020002a5f0793b4e0c67e61549e3a2b3458fb581390b741c" exitCode=0 Nov 28 17:30:01 crc kubenswrapper[4954]: I1128 17:30:01.928029 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" event={"ID":"861f3e46-6a29-458a-8c7d-50f3f00ec98b","Type":"ContainerDied","Data":"513617cf663e8341020002a5f0793b4e0c67e61549e3a2b3458fb581390b741c"} Nov 28 17:30:02 crc kubenswrapper[4954]: I1128 17:30:02.935720 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9mclb" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="registry-server" containerID="cri-o://021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859" gracePeriod=2 Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.190091 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.297308 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/861f3e46-6a29-458a-8c7d-50f3f00ec98b-config-volume\") pod \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.297390 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/861f3e46-6a29-458a-8c7d-50f3f00ec98b-secret-volume\") pod \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.297457 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vstp\" (UniqueName: \"kubernetes.io/projected/861f3e46-6a29-458a-8c7d-50f3f00ec98b-kube-api-access-2vstp\") pod \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\" (UID: \"861f3e46-6a29-458a-8c7d-50f3f00ec98b\") " Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.299305 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/861f3e46-6a29-458a-8c7d-50f3f00ec98b-config-volume" (OuterVolumeSpecName: "config-volume") pod "861f3e46-6a29-458a-8c7d-50f3f00ec98b" (UID: "861f3e46-6a29-458a-8c7d-50f3f00ec98b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.305444 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/861f3e46-6a29-458a-8c7d-50f3f00ec98b-kube-api-access-2vstp" (OuterVolumeSpecName: "kube-api-access-2vstp") pod "861f3e46-6a29-458a-8c7d-50f3f00ec98b" (UID: "861f3e46-6a29-458a-8c7d-50f3f00ec98b"). InnerVolumeSpecName "kube-api-access-2vstp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.305599 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/861f3e46-6a29-458a-8c7d-50f3f00ec98b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "861f3e46-6a29-458a-8c7d-50f3f00ec98b" (UID: "861f3e46-6a29-458a-8c7d-50f3f00ec98b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.340346 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.399009 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpzm2\" (UniqueName: \"kubernetes.io/projected/fd9a6132-ac43-460c-abaa-a3412bb463b0-kube-api-access-gpzm2\") pod \"fd9a6132-ac43-460c-abaa-a3412bb463b0\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.399158 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-catalog-content\") pod \"fd9a6132-ac43-460c-abaa-a3412bb463b0\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.399229 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-utilities\") pod \"fd9a6132-ac43-460c-abaa-a3412bb463b0\" (UID: \"fd9a6132-ac43-460c-abaa-a3412bb463b0\") " Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.399553 4954 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/861f3e46-6a29-458a-8c7d-50f3f00ec98b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.399573 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vstp\" (UniqueName: \"kubernetes.io/projected/861f3e46-6a29-458a-8c7d-50f3f00ec98b-kube-api-access-2vstp\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.399583 4954 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/861f3e46-6a29-458a-8c7d-50f3f00ec98b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.400340 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-utilities" (OuterVolumeSpecName: "utilities") pod "fd9a6132-ac43-460c-abaa-a3412bb463b0" (UID: "fd9a6132-ac43-460c-abaa-a3412bb463b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.402954 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd9a6132-ac43-460c-abaa-a3412bb463b0-kube-api-access-gpzm2" (OuterVolumeSpecName: "kube-api-access-gpzm2") pod "fd9a6132-ac43-460c-abaa-a3412bb463b0" (UID: "fd9a6132-ac43-460c-abaa-a3412bb463b0"). InnerVolumeSpecName "kube-api-access-gpzm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.450885 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd9a6132-ac43-460c-abaa-a3412bb463b0" (UID: "fd9a6132-ac43-460c-abaa-a3412bb463b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.499987 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.500050 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd9a6132-ac43-460c-abaa-a3412bb463b0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.500063 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpzm2\" (UniqueName: \"kubernetes.io/projected/fd9a6132-ac43-460c-abaa-a3412bb463b0-kube-api-access-gpzm2\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.945462 4954 generic.go:334] "Generic (PLEG): container finished" podID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerID="021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859" exitCode=0 Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.945508 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9mclb" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.945592 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mclb" event={"ID":"fd9a6132-ac43-460c-abaa-a3412bb463b0","Type":"ContainerDied","Data":"021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859"} Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.946383 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9mclb" event={"ID":"fd9a6132-ac43-460c-abaa-a3412bb463b0","Type":"ContainerDied","Data":"cd3e5d3fe4a47e717a907af055765da3061236c6e2865753f4043deb26e70e80"} Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.946436 4954 scope.go:117] "RemoveContainer" containerID="021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.949349 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" event={"ID":"861f3e46-6a29-458a-8c7d-50f3f00ec98b","Type":"ContainerDied","Data":"7184bda43beae4873695d4ebfb6ad3d0be3974a2c9f8f1bee06aff5a31e594ae"} Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.949397 4954 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7184bda43beae4873695d4ebfb6ad3d0be3974a2c9f8f1bee06aff5a31e594ae" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.949522 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-hgq2v" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.977661 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9mclb"] Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.978598 4954 scope.go:117] "RemoveContainer" containerID="339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9" Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.984331 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9mclb"] Nov 28 17:30:03 crc kubenswrapper[4954]: I1128 17:30:03.998013 4954 scope.go:117] "RemoveContainer" containerID="8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098" Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.016450 4954 scope.go:117] "RemoveContainer" containerID="021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859" Nov 28 17:30:04 crc kubenswrapper[4954]: E1128 17:30:04.017185 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859\": container with ID starting with 021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859 not found: ID does not exist" containerID="021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859" Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.017228 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859"} err="failed to get container status \"021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859\": rpc error: code = NotFound desc = could not find container \"021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859\": container with ID starting with 021a610585251336d91ce615696ad63c9d23a74254c6bdc36f97b672780e6859 not found: ID does not exist" Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.017256 4954 scope.go:117] "RemoveContainer" containerID="339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9" Nov 28 17:30:04 crc kubenswrapper[4954]: E1128 17:30:04.017580 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9\": container with ID starting with 339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9 not found: ID does not exist" containerID="339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9" Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.017603 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9"} err="failed to get container status \"339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9\": rpc error: code = NotFound desc = could not find container \"339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9\": container with ID starting with 339aad611f91a2a2661a206e8494990581ee8b57de125c5f8db5d6751ddc77e9 not found: ID does not exist" Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.017615 4954 scope.go:117] "RemoveContainer" containerID="8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098" Nov 28 17:30:04 crc kubenswrapper[4954]: E1128 17:30:04.017901 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098\": container with ID starting with 8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098 not found: ID does not exist" containerID="8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098" Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.017935 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098"} err="failed to get container status \"8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098\": rpc error: code = NotFound desc = could not find container \"8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098\": container with ID starting with 8c77ffb81dc99212fc0f7a1c4424729a14e85cb436ed6e5538fc629ed95c3098 not found: ID does not exist" Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.278788 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g"] Nov 28 17:30:04 crc kubenswrapper[4954]: I1128 17:30:04.284244 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-mzb4g"] Nov 28 17:30:05 crc kubenswrapper[4954]: I1128 17:30:05.865095 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2831b507-d16f-4c5d-8197-75ca53f2867e" path="/var/lib/kubelet/pods/2831b507-d16f-4c5d-8197-75ca53f2867e/volumes" Nov 28 17:30:05 crc kubenswrapper[4954]: I1128 17:30:05.866054 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" path="/var/lib/kubelet/pods/fd9a6132-ac43-460c-abaa-a3412bb463b0/volumes" Nov 28 17:30:06 crc kubenswrapper[4954]: I1128 17:30:06.177473 4954 scope.go:117] "RemoveContainer" containerID="6edceb02528b52f6b5506c92c459d2a23cff7512dc8475f5694bc820a6757719" Nov 28 17:32:02 crc kubenswrapper[4954]: I1128 17:32:02.480444 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:32:02 crc kubenswrapper[4954]: I1128 17:32:02.481111 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:32:32 crc kubenswrapper[4954]: I1128 17:32:32.480612 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:32:32 crc kubenswrapper[4954]: I1128 17:32:32.481489 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.480442 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.481110 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.481168 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.481919 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d38eb7f33f90e48d53b26448b4f83258a9ea24c8166e6f7984877f8f9b54aff"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.481977 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://9d38eb7f33f90e48d53b26448b4f83258a9ea24c8166e6f7984877f8f9b54aff" gracePeriod=600 Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.664317 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="9d38eb7f33f90e48d53b26448b4f83258a9ea24c8166e6f7984877f8f9b54aff" exitCode=0 Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.664393 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"9d38eb7f33f90e48d53b26448b4f83258a9ea24c8166e6f7984877f8f9b54aff"} Nov 28 17:33:02 crc kubenswrapper[4954]: I1128 17:33:02.664711 4954 scope.go:117] "RemoveContainer" containerID="03f0a5c68ae9c9258e038b3d434d1593cd0244e5fbaaa7acaf1d1efb970984d1" Nov 28 17:33:03 crc kubenswrapper[4954]: I1128 17:33:03.676049 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416"} Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.988098 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-wpdqq/must-gather-vl2l8"] Nov 28 17:34:45 crc kubenswrapper[4954]: E1128 17:34:45.988922 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="861f3e46-6a29-458a-8c7d-50f3f00ec98b" containerName="collect-profiles" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.988935 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="861f3e46-6a29-458a-8c7d-50f3f00ec98b" containerName="collect-profiles" Nov 28 17:34:45 crc kubenswrapper[4954]: E1128 17:34:45.988949 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="extract-utilities" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.988956 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="extract-utilities" Nov 28 17:34:45 crc kubenswrapper[4954]: E1128 17:34:45.988973 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="extract-content" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.988979 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="extract-content" Nov 28 17:34:45 crc kubenswrapper[4954]: E1128 17:34:45.988995 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="registry-server" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.989001 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="registry-server" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.989131 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="861f3e46-6a29-458a-8c7d-50f3f00ec98b" containerName="collect-profiles" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.989149 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd9a6132-ac43-460c-abaa-a3412bb463b0" containerName="registry-server" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.989865 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.992052 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-wpdqq"/"kube-root-ca.crt" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.994897 4954 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-wpdqq"/"default-dockercfg-bgsn8" Nov 28 17:34:45 crc kubenswrapper[4954]: I1128 17:34:45.994941 4954 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-wpdqq"/"openshift-service-ca.crt" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.000446 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-wpdqq/must-gather-vl2l8"] Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.145959 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn26t\" (UniqueName: \"kubernetes.io/projected/598386fe-f553-4dc1-b298-2b9e66befd85-kube-api-access-kn26t\") pod \"must-gather-vl2l8\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.146020 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/598386fe-f553-4dc1-b298-2b9e66befd85-must-gather-output\") pod \"must-gather-vl2l8\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.246931 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn26t\" (UniqueName: \"kubernetes.io/projected/598386fe-f553-4dc1-b298-2b9e66befd85-kube-api-access-kn26t\") pod \"must-gather-vl2l8\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.246996 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/598386fe-f553-4dc1-b298-2b9e66befd85-must-gather-output\") pod \"must-gather-vl2l8\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.247461 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/598386fe-f553-4dc1-b298-2b9e66befd85-must-gather-output\") pod \"must-gather-vl2l8\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.272983 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn26t\" (UniqueName: \"kubernetes.io/projected/598386fe-f553-4dc1-b298-2b9e66befd85-kube-api-access-kn26t\") pod \"must-gather-vl2l8\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.307891 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.793875 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-wpdqq/must-gather-vl2l8"] Nov 28 17:34:46 crc kubenswrapper[4954]: I1128 17:34:46.806602 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:34:47 crc kubenswrapper[4954]: I1128 17:34:47.809048 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" event={"ID":"598386fe-f553-4dc1-b298-2b9e66befd85","Type":"ContainerStarted","Data":"3088eb9a181332deaa6009d66a02e6bab882210c51a4dfae625e910f43d4e135"} Nov 28 17:34:51 crc kubenswrapper[4954]: I1128 17:34:51.838605 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" event={"ID":"598386fe-f553-4dc1-b298-2b9e66befd85","Type":"ContainerStarted","Data":"ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0"} Nov 28 17:34:51 crc kubenswrapper[4954]: I1128 17:34:51.839168 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" event={"ID":"598386fe-f553-4dc1-b298-2b9e66befd85","Type":"ContainerStarted","Data":"0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb"} Nov 28 17:34:51 crc kubenswrapper[4954]: I1128 17:34:51.854579 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" podStartSLOduration=2.504422261 podStartE2EDuration="6.85456059s" podCreationTimestamp="2025-11-28 17:34:45 +0000 UTC" firstStartedPulling="2025-11-28 17:34:46.806302179 +0000 UTC m=+5040.197970730" lastFinishedPulling="2025-11-28 17:34:51.156440518 +0000 UTC m=+5044.548109059" observedRunningTime="2025-11-28 17:34:51.85141508 +0000 UTC m=+5045.243083611" watchObservedRunningTime="2025-11-28 17:34:51.85456059 +0000 UTC m=+5045.246229131" Nov 28 17:35:02 crc kubenswrapper[4954]: I1128 17:35:02.480879 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:35:02 crc kubenswrapper[4954]: I1128 17:35:02.481660 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:35:32 crc kubenswrapper[4954]: I1128 17:35:32.480816 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:35:32 crc kubenswrapper[4954]: I1128 17:35:32.481489 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.451328 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc_d76bc42a-993b-4748-a566-a5efb43bf20a/util/0.log" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.649091 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc_d76bc42a-993b-4748-a566-a5efb43bf20a/util/0.log" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.703074 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc_d76bc42a-993b-4748-a566-a5efb43bf20a/pull/0.log" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.712367 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc_d76bc42a-993b-4748-a566-a5efb43bf20a/pull/0.log" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.813649 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc_d76bc42a-993b-4748-a566-a5efb43bf20a/util/0.log" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.831251 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc_d76bc42a-993b-4748-a566-a5efb43bf20a/pull/0.log" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.917739 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_170303e37f43f7d4fb9c01e6e287a976e4e0e80cbddd20eca5c3983f88gkvnc_d76bc42a-993b-4748-a566-a5efb43bf20a/extract/0.log" Nov 28 17:35:46 crc kubenswrapper[4954]: I1128 17:35:46.979460 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-vd2qv_273ea62c-4545-4d8e-b831-ad40b8413f6d/kube-rbac-proxy/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.090003 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-vd2qv_273ea62c-4545-4d8e-b831-ad40b8413f6d/manager/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.145953 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-dxqks_22df58cc-9b57-40fd-a1fd-6dfb287daea1/kube-rbac-proxy/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.237557 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-dxqks_22df58cc-9b57-40fd-a1fd-6dfb287daea1/manager/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.309047 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-frclp_2a678943-ab1c-46a3-89d9-03cec5259ecc/kube-rbac-proxy/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.341034 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-frclp_2a678943-ab1c-46a3-89d9-03cec5259ecc/manager/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.478783 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-jpsg7_8be7ca71-bdf4-42ca-8593-f5ad8ae1404c/kube-rbac-proxy/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.590823 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-668d9c48b9-jpsg7_8be7ca71-bdf4-42ca-8593-f5ad8ae1404c/manager/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.697790 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-kvcbm_1aff6577-6109-4234-8300-62afe871125b/manager/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.749466 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-kvcbm_1aff6577-6109-4234-8300-62afe871125b/kube-rbac-proxy/0.log" Nov 28 17:35:47 crc kubenswrapper[4954]: I1128 17:35:47.763072 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-xl989_3c9ab6ab-bcb8-4129-bd15-83a08cf28f66/kube-rbac-proxy/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.085365 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-xl989_3c9ab6ab-bcb8-4129-bd15-83a08cf28f66/manager/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.152937 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-c6pr6_ac397e56-94ea-4618-8516-dba8864212ef/kube-rbac-proxy/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.304716 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-c6pr6_ac397e56-94ea-4618-8516-dba8864212ef/manager/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.348794 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-8jp7l_65c96ed2-5a4a-4f4e-898d-3f3705d42250/kube-rbac-proxy/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.377459 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-8jp7l_65c96ed2-5a4a-4f4e-898d-3f3705d42250/manager/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.575274 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-nkpj2_801feb18-7515-4b28-a364-23c2ad1d29a2/kube-rbac-proxy/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.621909 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-nkpj2_801feb18-7515-4b28-a364-23c2ad1d29a2/manager/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.771576 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-svnnf_1e466f0e-3bcc-4812-9939-90e852e54c6e/kube-rbac-proxy/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.790630 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-svnnf_1e466f0e-3bcc-4812-9939-90e852e54c6e/manager/0.log" Nov 28 17:35:48 crc kubenswrapper[4954]: I1128 17:35:48.851212 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-9smv8_9689608c-a2b5-49cd-a758-14d120acd0f4/kube-rbac-proxy/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.018192 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-stknf_1bf94f9e-ea95-4f86-9cc6-3bc27093b601/kube-rbac-proxy/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.036403 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-9smv8_9689608c-a2b5-49cd-a758-14d120acd0f4/manager/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.096888 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-stknf_1bf94f9e-ea95-4f86-9cc6-3bc27093b601/manager/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.223115 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-mbhfd_b6a73982-7f81-4947-938d-331487f421e0/kube-rbac-proxy/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.304146 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-mbhfd_b6a73982-7f81-4947-938d-331487f421e0/manager/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.432103 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-jrqz6_1a866879-c10a-477b-8e98-322e057ea708/kube-rbac-proxy/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.490269 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-jrqz6_1a866879-c10a-477b-8e98-322e057ea708/manager/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.597604 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4r279n_b2643938-ed66-4bf8-b080-24b3f8149d15/kube-rbac-proxy/0.log" Nov 28 17:35:49 crc kubenswrapper[4954]: I1128 17:35:49.635602 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd4r279n_b2643938-ed66-4bf8-b080-24b3f8149d15/manager/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.038519 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-xw2zf_01a1f124-447b-410d-9a6b-ebff06524b31/registry-server/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.060758 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6fcddf5ccf-nk2nv_472bd715-386a-403e-9885-56d830589114/operator/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.261001 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-9vjc5_ba448a3d-8e7a-4d75-8853-8cf28954b48d/kube-rbac-proxy/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.381635 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-9vjc5_ba448a3d-8e7a-4d75-8853-8cf28954b48d/manager/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.389481 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-8wvs2_2023394a-92c9-4f1e-8823-f0e33c9381b3/kube-rbac-proxy/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.508331 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-78d5d44766-fx2tm_64e36e10-c08f-481d-9fc1-c08d7a54d72d/manager/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.523946 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-8wvs2_2023394a-92c9-4f1e-8823-f0e33c9381b3/manager/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.617838 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-784hq_73b9fb56-c718-41db-a043-82defc274674/operator/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.719462 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-wcz2l_a2541e1b-97ee-4c01-999e-1f62adba25b6/kube-rbac-proxy/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.723483 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-wcz2l_a2541e1b-97ee-4c01-999e-1f62adba25b6/manager/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.832906 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-vgcrb_92c27f55-05f6-4359-8ec8-9fa067172b51/kube-rbac-proxy/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.916361 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-7fjtc_d036c938-f2af-483f-b0a7-2a00101a5e7f/kube-rbac-proxy/0.log" Nov 28 17:35:50 crc kubenswrapper[4954]: I1128 17:35:50.928271 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-vgcrb_92c27f55-05f6-4359-8ec8-9fa067172b51/manager/0.log" Nov 28 17:35:51 crc kubenswrapper[4954]: I1128 17:35:51.021108 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-7fjtc_d036c938-f2af-483f-b0a7-2a00101a5e7f/manager/0.log" Nov 28 17:35:51 crc kubenswrapper[4954]: I1128 17:35:51.113502 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-62lbb_ac579e75-f040-4640-ac48-7760cbce72ed/kube-rbac-proxy/0.log" Nov 28 17:35:51 crc kubenswrapper[4954]: I1128 17:35:51.114953 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-62lbb_ac579e75-f040-4640-ac48-7760cbce72ed/manager/0.log" Nov 28 17:36:02 crc kubenswrapper[4954]: I1128 17:36:02.480712 4954 patch_prober.go:28] interesting pod/machine-config-daemon-jprxj container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:36:02 crc kubenswrapper[4954]: I1128 17:36:02.481351 4954 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:36:02 crc kubenswrapper[4954]: I1128 17:36:02.481408 4954 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" Nov 28 17:36:02 crc kubenswrapper[4954]: I1128 17:36:02.482118 4954 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416"} pod="openshift-machine-config-operator/machine-config-daemon-jprxj" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:36:02 crc kubenswrapper[4954]: I1128 17:36:02.482170 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerName="machine-config-daemon" containerID="cri-o://059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" gracePeriod=600 Nov 28 17:36:02 crc kubenswrapper[4954]: E1128 17:36:02.605780 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:36:03 crc kubenswrapper[4954]: I1128 17:36:03.332220 4954 generic.go:334] "Generic (PLEG): container finished" podID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" exitCode=0 Nov 28 17:36:03 crc kubenswrapper[4954]: I1128 17:36:03.332273 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerDied","Data":"059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416"} Nov 28 17:36:03 crc kubenswrapper[4954]: I1128 17:36:03.332314 4954 scope.go:117] "RemoveContainer" containerID="9d38eb7f33f90e48d53b26448b4f83258a9ea24c8166e6f7984877f8f9b54aff" Nov 28 17:36:03 crc kubenswrapper[4954]: I1128 17:36:03.332959 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:36:03 crc kubenswrapper[4954]: E1128 17:36:03.333414 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:36:09 crc kubenswrapper[4954]: I1128 17:36:09.332812 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-c9ck6_8f76f36e-152f-4386-bb8b-79a98d8bbfa6/control-plane-machine-set-operator/0.log" Nov 28 17:36:09 crc kubenswrapper[4954]: I1128 17:36:09.488727 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-sr684_1576e0fb-2ebb-475e-abbc-6d4da8aca68c/kube-rbac-proxy/0.log" Nov 28 17:36:09 crc kubenswrapper[4954]: I1128 17:36:09.497266 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-sr684_1576e0fb-2ebb-475e-abbc-6d4da8aca68c/machine-api-operator/0.log" Nov 28 17:36:13 crc kubenswrapper[4954]: I1128 17:36:13.856488 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:36:13 crc kubenswrapper[4954]: E1128 17:36:13.857126 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:36:21 crc kubenswrapper[4954]: I1128 17:36:21.580633 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-rzs6f_b480d999-11d1-495e-8220-7c149204d009/cert-manager-controller/0.log" Nov 28 17:36:21 crc kubenswrapper[4954]: I1128 17:36:21.698657 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-96289_8c016281-4e3e-4410-81e7-ede4c7f72535/cert-manager-cainjector/0.log" Nov 28 17:36:21 crc kubenswrapper[4954]: I1128 17:36:21.718364 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-mlb76_2e56d709-e547-46e4-9bb5-f8c92f1742ce/cert-manager-webhook/0.log" Nov 28 17:36:26 crc kubenswrapper[4954]: I1128 17:36:26.856076 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:36:26 crc kubenswrapper[4954]: E1128 17:36:26.856739 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:36:33 crc kubenswrapper[4954]: I1128 17:36:33.769740 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-qx8zp_cad88f9f-405b-4de7-afab-7df439e60137/nmstate-console-plugin/0.log" Nov 28 17:36:33 crc kubenswrapper[4954]: I1128 17:36:33.939365 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-vvd9d_4506960b-5eca-435d-acc7-4c306a47a327/nmstate-handler/0.log" Nov 28 17:36:33 crc kubenswrapper[4954]: I1128 17:36:33.955937 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-vshc9_129fb511-5b79-4405-a5f2-c5b6028f4004/kube-rbac-proxy/0.log" Nov 28 17:36:33 crc kubenswrapper[4954]: I1128 17:36:33.973406 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-vshc9_129fb511-5b79-4405-a5f2-c5b6028f4004/nmstate-metrics/0.log" Nov 28 17:36:34 crc kubenswrapper[4954]: I1128 17:36:34.131383 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-pkc84_28db0f38-55d4-45de-b319-d85de1288c27/nmstate-operator/0.log" Nov 28 17:36:34 crc kubenswrapper[4954]: I1128 17:36:34.176878 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-b65g2_0f342ee6-8cab-41ec-b04e-61c4e241ef1a/nmstate-webhook/0.log" Nov 28 17:36:41 crc kubenswrapper[4954]: I1128 17:36:41.856149 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:36:41 crc kubenswrapper[4954]: E1128 17:36:41.857302 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:36:43 crc kubenswrapper[4954]: I1128 17:36:43.865960 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mxvfd"] Nov 28 17:36:43 crc kubenswrapper[4954]: I1128 17:36:43.867597 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:43 crc kubenswrapper[4954]: I1128 17:36:43.879128 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mxvfd"] Nov 28 17:36:43 crc kubenswrapper[4954]: I1128 17:36:43.941514 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkxmg\" (UniqueName: \"kubernetes.io/projected/84115938-bf42-4f0c-a905-38d853fd735b-kube-api-access-lkxmg\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:43 crc kubenswrapper[4954]: I1128 17:36:43.941655 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-utilities\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:43 crc kubenswrapper[4954]: I1128 17:36:43.941698 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-catalog-content\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.042973 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkxmg\" (UniqueName: \"kubernetes.io/projected/84115938-bf42-4f0c-a905-38d853fd735b-kube-api-access-lkxmg\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.043066 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-utilities\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.043106 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-catalog-content\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.043674 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-catalog-content\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.043802 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-utilities\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.066569 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkxmg\" (UniqueName: \"kubernetes.io/projected/84115938-bf42-4f0c-a905-38d853fd735b-kube-api-access-lkxmg\") pod \"redhat-operators-mxvfd\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.186110 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:44 crc kubenswrapper[4954]: I1128 17:36:44.629216 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mxvfd"] Nov 28 17:36:45 crc kubenswrapper[4954]: I1128 17:36:45.616191 4954 generic.go:334] "Generic (PLEG): container finished" podID="84115938-bf42-4f0c-a905-38d853fd735b" containerID="683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5" exitCode=0 Nov 28 17:36:45 crc kubenswrapper[4954]: I1128 17:36:45.616280 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxvfd" event={"ID":"84115938-bf42-4f0c-a905-38d853fd735b","Type":"ContainerDied","Data":"683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5"} Nov 28 17:36:45 crc kubenswrapper[4954]: I1128 17:36:45.616605 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxvfd" event={"ID":"84115938-bf42-4f0c-a905-38d853fd735b","Type":"ContainerStarted","Data":"45e43f73ead6cf8aee84c3b84c2c33be80f8a52ab6f16925d43627d85ddd0f6e"} Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.064259 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zp26c"] Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.065750 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.132364 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zp26c"] Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.184135 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dzch\" (UniqueName: \"kubernetes.io/projected/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-kube-api-access-2dzch\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.184191 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-catalog-content\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.184222 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-utilities\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.286321 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dzch\" (UniqueName: \"kubernetes.io/projected/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-kube-api-access-2dzch\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.286374 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-catalog-content\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.286395 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-utilities\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.286992 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-utilities\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.287058 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-catalog-content\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.305960 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dzch\" (UniqueName: \"kubernetes.io/projected/5640eed0-7f64-43b3-98cc-7d54ceb70ab4-kube-api-access-2dzch\") pod \"community-operators-zp26c\" (UID: \"5640eed0-7f64-43b3-98cc-7d54ceb70ab4\") " pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.399041 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:46 crc kubenswrapper[4954]: I1128 17:36:46.956832 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zp26c"] Nov 28 17:36:46 crc kubenswrapper[4954]: W1128 17:36:46.958746 4954 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5640eed0_7f64_43b3_98cc_7d54ceb70ab4.slice/crio-bff12d331c5183fdb21ac776f78ef6fa04015a09179e0d33f0297948020f06c2 WatchSource:0}: Error finding container bff12d331c5183fdb21ac776f78ef6fa04015a09179e0d33f0297948020f06c2: Status 404 returned error can't find the container with id bff12d331c5183fdb21ac776f78ef6fa04015a09179e0d33f0297948020f06c2 Nov 28 17:36:47 crc kubenswrapper[4954]: I1128 17:36:47.633655 4954 generic.go:334] "Generic (PLEG): container finished" podID="84115938-bf42-4f0c-a905-38d853fd735b" containerID="d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07" exitCode=0 Nov 28 17:36:47 crc kubenswrapper[4954]: I1128 17:36:47.633822 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxvfd" event={"ID":"84115938-bf42-4f0c-a905-38d853fd735b","Type":"ContainerDied","Data":"d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07"} Nov 28 17:36:47 crc kubenswrapper[4954]: I1128 17:36:47.636246 4954 generic.go:334] "Generic (PLEG): container finished" podID="5640eed0-7f64-43b3-98cc-7d54ceb70ab4" containerID="8e8209fade6f53729ee24bbea589c3a2dbb7a34eb10a3012e48e781da7c4bcc2" exitCode=0 Nov 28 17:36:47 crc kubenswrapper[4954]: I1128 17:36:47.636281 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zp26c" event={"ID":"5640eed0-7f64-43b3-98cc-7d54ceb70ab4","Type":"ContainerDied","Data":"8e8209fade6f53729ee24bbea589c3a2dbb7a34eb10a3012e48e781da7c4bcc2"} Nov 28 17:36:47 crc kubenswrapper[4954]: I1128 17:36:47.636305 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zp26c" event={"ID":"5640eed0-7f64-43b3-98cc-7d54ceb70ab4","Type":"ContainerStarted","Data":"bff12d331c5183fdb21ac776f78ef6fa04015a09179e0d33f0297948020f06c2"} Nov 28 17:36:47 crc kubenswrapper[4954]: I1128 17:36:47.966623 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-pl6wr_cd7512cb-cf15-46bc-a4ce-a530e16c305e/kube-rbac-proxy/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.247264 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-frr-files/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.348611 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-pl6wr_cd7512cb-cf15-46bc-a4ce-a530e16c305e/controller/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.374059 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-frr-files/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.380080 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-reloader/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.448387 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-metrics/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.596181 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-reloader/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.653492 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxvfd" event={"ID":"84115938-bf42-4f0c-a905-38d853fd735b","Type":"ContainerStarted","Data":"9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84"} Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.675905 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mxvfd" podStartSLOduration=3.122667184 podStartE2EDuration="5.67588443s" podCreationTimestamp="2025-11-28 17:36:43 +0000 UTC" firstStartedPulling="2025-11-28 17:36:45.617410476 +0000 UTC m=+5159.009079017" lastFinishedPulling="2025-11-28 17:36:48.170627722 +0000 UTC m=+5161.562296263" observedRunningTime="2025-11-28 17:36:48.673448973 +0000 UTC m=+5162.065117504" watchObservedRunningTime="2025-11-28 17:36:48.67588443 +0000 UTC m=+5162.067552971" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.754729 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-frr-files/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.780284 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-reloader/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.790936 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-metrics/0.log" Nov 28 17:36:48 crc kubenswrapper[4954]: I1128 17:36:48.840972 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-metrics/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.003889 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-reloader/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.014187 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-frr-files/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.028958 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/cp-metrics/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.085480 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/controller/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.235272 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/frr-metrics/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.236136 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/kube-rbac-proxy/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.311677 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/kube-rbac-proxy-frr/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.472929 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/reloader/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.511262 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-7q559_b5e787d2-d6a3-4e50-9a29-077809abbe6a/frr-k8s-webhook-server/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.804986 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-67f7cc84c-gqj2n_f60ce9cc-8da2-43ee-a9b9-ea24ed39a910/manager/0.log" Nov 28 17:36:49 crc kubenswrapper[4954]: I1128 17:36:49.934040 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-54cdc5b6b6-5zldd_d80c8971-4919-44a7-9241-ad0095dbb820/webhook-server/0.log" Nov 28 17:36:50 crc kubenswrapper[4954]: I1128 17:36:50.017999 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-nb867_f616c2cc-42cf-47c5-88f7-608060ddcd9e/kube-rbac-proxy/0.log" Nov 28 17:36:50 crc kubenswrapper[4954]: I1128 17:36:50.712629 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-vxw8v_bb5e50bb-a885-4ae5-a7a8-d9214274540e/frr/0.log" Nov 28 17:36:50 crc kubenswrapper[4954]: I1128 17:36:50.729100 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-nb867_f616c2cc-42cf-47c5-88f7-608060ddcd9e/speaker/0.log" Nov 28 17:36:53 crc kubenswrapper[4954]: I1128 17:36:53.857864 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:36:53 crc kubenswrapper[4954]: E1128 17:36:53.858308 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:36:54 crc kubenswrapper[4954]: I1128 17:36:54.186251 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:54 crc kubenswrapper[4954]: I1128 17:36:54.186292 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:54 crc kubenswrapper[4954]: I1128 17:36:54.234829 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:54 crc kubenswrapper[4954]: I1128 17:36:54.704010 4954 generic.go:334] "Generic (PLEG): container finished" podID="5640eed0-7f64-43b3-98cc-7d54ceb70ab4" containerID="c7917f5a2b669c6324f60c771fe5cbb1a5f4d385211375e57087a76c327ec764" exitCode=0 Nov 28 17:36:54 crc kubenswrapper[4954]: I1128 17:36:54.704123 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zp26c" event={"ID":"5640eed0-7f64-43b3-98cc-7d54ceb70ab4","Type":"ContainerDied","Data":"c7917f5a2b669c6324f60c771fe5cbb1a5f4d385211375e57087a76c327ec764"} Nov 28 17:36:54 crc kubenswrapper[4954]: I1128 17:36:54.764339 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:55 crc kubenswrapper[4954]: I1128 17:36:55.714095 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zp26c" event={"ID":"5640eed0-7f64-43b3-98cc-7d54ceb70ab4","Type":"ContainerStarted","Data":"14aeeaec3c311feaa6f5ea112542cbb50d559c4cb368df8f6ddb1b4e724ff8a6"} Nov 28 17:36:55 crc kubenswrapper[4954]: I1128 17:36:55.735582 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zp26c" podStartSLOduration=2.198575269 podStartE2EDuration="9.735554159s" podCreationTimestamp="2025-11-28 17:36:46 +0000 UTC" firstStartedPulling="2025-11-28 17:36:47.637789 +0000 UTC m=+5161.029457541" lastFinishedPulling="2025-11-28 17:36:55.1747679 +0000 UTC m=+5168.566436431" observedRunningTime="2025-11-28 17:36:55.730201919 +0000 UTC m=+5169.121870460" watchObservedRunningTime="2025-11-28 17:36:55.735554159 +0000 UTC m=+5169.127222720" Nov 28 17:36:56 crc kubenswrapper[4954]: I1128 17:36:56.399619 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:56 crc kubenswrapper[4954]: I1128 17:36:56.399692 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:36:57 crc kubenswrapper[4954]: I1128 17:36:57.438051 4954 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-zp26c" podUID="5640eed0-7f64-43b3-98cc-7d54ceb70ab4" containerName="registry-server" probeResult="failure" output=< Nov 28 17:36:57 crc kubenswrapper[4954]: timeout: failed to connect service ":50051" within 1s Nov 28 17:36:57 crc kubenswrapper[4954]: > Nov 28 17:36:58 crc kubenswrapper[4954]: I1128 17:36:58.055154 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mxvfd"] Nov 28 17:36:58 crc kubenswrapper[4954]: I1128 17:36:58.055691 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mxvfd" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="registry-server" containerID="cri-o://9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84" gracePeriod=2 Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.552928 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.563650 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkxmg\" (UniqueName: \"kubernetes.io/projected/84115938-bf42-4f0c-a905-38d853fd735b-kube-api-access-lkxmg\") pod \"84115938-bf42-4f0c-a905-38d853fd735b\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.563718 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-catalog-content\") pod \"84115938-bf42-4f0c-a905-38d853fd735b\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.563875 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-utilities\") pod \"84115938-bf42-4f0c-a905-38d853fd735b\" (UID: \"84115938-bf42-4f0c-a905-38d853fd735b\") " Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.565015 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-utilities" (OuterVolumeSpecName: "utilities") pod "84115938-bf42-4f0c-a905-38d853fd735b" (UID: "84115938-bf42-4f0c-a905-38d853fd735b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.572048 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84115938-bf42-4f0c-a905-38d853fd735b-kube-api-access-lkxmg" (OuterVolumeSpecName: "kube-api-access-lkxmg") pod "84115938-bf42-4f0c-a905-38d853fd735b" (UID: "84115938-bf42-4f0c-a905-38d853fd735b"). InnerVolumeSpecName "kube-api-access-lkxmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.665853 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.665885 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkxmg\" (UniqueName: \"kubernetes.io/projected/84115938-bf42-4f0c-a905-38d853fd735b-kube-api-access-lkxmg\") on node \"crc\" DevicePath \"\"" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.670687 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84115938-bf42-4f0c-a905-38d853fd735b" (UID: "84115938-bf42-4f0c-a905-38d853fd735b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.744305 4954 generic.go:334] "Generic (PLEG): container finished" podID="84115938-bf42-4f0c-a905-38d853fd735b" containerID="9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84" exitCode=0 Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.744364 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxvfd" event={"ID":"84115938-bf42-4f0c-a905-38d853fd735b","Type":"ContainerDied","Data":"9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84"} Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.744412 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxvfd" event={"ID":"84115938-bf42-4f0c-a905-38d853fd735b","Type":"ContainerDied","Data":"45e43f73ead6cf8aee84c3b84c2c33be80f8a52ab6f16925d43627d85ddd0f6e"} Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.744437 4954 scope.go:117] "RemoveContainer" containerID="9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.744381 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxvfd" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.767505 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84115938-bf42-4f0c-a905-38d853fd735b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.769846 4954 scope.go:117] "RemoveContainer" containerID="d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.789810 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mxvfd"] Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.793070 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mxvfd"] Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.806852 4954 scope.go:117] "RemoveContainer" containerID="683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.826082 4954 scope.go:117] "RemoveContainer" containerID="9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84" Nov 28 17:36:59 crc kubenswrapper[4954]: E1128 17:36:59.826709 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84\": container with ID starting with 9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84 not found: ID does not exist" containerID="9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.826753 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84"} err="failed to get container status \"9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84\": rpc error: code = NotFound desc = could not find container \"9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84\": container with ID starting with 9a2dae827e943298c56f957c493a440baa0f5de3d4b7fb6bb7bbcd5fe3bb2d84 not found: ID does not exist" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.826780 4954 scope.go:117] "RemoveContainer" containerID="d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07" Nov 28 17:36:59 crc kubenswrapper[4954]: E1128 17:36:59.827215 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07\": container with ID starting with d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07 not found: ID does not exist" containerID="d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.827238 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07"} err="failed to get container status \"d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07\": rpc error: code = NotFound desc = could not find container \"d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07\": container with ID starting with d6f09e594f7ce12aabd70b2ab9b543186c9194919f05ef7d05e762a644377f07 not found: ID does not exist" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.827259 4954 scope.go:117] "RemoveContainer" containerID="683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5" Nov 28 17:36:59 crc kubenswrapper[4954]: E1128 17:36:59.827973 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5\": container with ID starting with 683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5 not found: ID does not exist" containerID="683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.828003 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5"} err="failed to get container status \"683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5\": rpc error: code = NotFound desc = could not find container \"683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5\": container with ID starting with 683bb0a88059eb27073c8a75627f61457b444dc472135a6534c3209a867d2eb5 not found: ID does not exist" Nov 28 17:36:59 crc kubenswrapper[4954]: I1128 17:36:59.865277 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84115938-bf42-4f0c-a905-38d853fd735b" path="/var/lib/kubelet/pods/84115938-bf42-4f0c-a905-38d853fd735b/volumes" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.060935 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862_6b04abe7-9fca-439a-809b-e6c5c09aa88b/util/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.292560 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862_6b04abe7-9fca-439a-809b-e6c5c09aa88b/util/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.302435 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862_6b04abe7-9fca-439a-809b-e6c5c09aa88b/pull/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.355163 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862_6b04abe7-9fca-439a-809b-e6c5c09aa88b/pull/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.524576 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862_6b04abe7-9fca-439a-809b-e6c5c09aa88b/util/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.539449 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862_6b04abe7-9fca-439a-809b-e6c5c09aa88b/extract/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.546192 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931azm862_6b04abe7-9fca-439a-809b-e6c5c09aa88b/pull/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.731586 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5_180f36bc-8328-43c7-92fc-f3bf6deee7ef/util/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.852082 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5_180f36bc-8328-43c7-92fc-f3bf6deee7ef/util/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.909519 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5_180f36bc-8328-43c7-92fc-f3bf6deee7ef/pull/0.log" Nov 28 17:37:03 crc kubenswrapper[4954]: I1128 17:37:03.931861 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5_180f36bc-8328-43c7-92fc-f3bf6deee7ef/pull/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.114672 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5_180f36bc-8328-43c7-92fc-f3bf6deee7ef/util/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.115217 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5_180f36bc-8328-43c7-92fc-f3bf6deee7ef/pull/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.153366 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fgk9d5_180f36bc-8328-43c7-92fc-f3bf6deee7ef/extract/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.317982 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n_0a285e7e-472d-473d-909c-c1c2de773b12/util/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.460775 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n_0a285e7e-472d-473d-909c-c1c2de773b12/util/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.502799 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n_0a285e7e-472d-473d-909c-c1c2de773b12/pull/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.505083 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n_0a285e7e-472d-473d-909c-c1c2de773b12/pull/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.683708 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n_0a285e7e-472d-473d-909c-c1c2de773b12/util/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.686234 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n_0a285e7e-472d-473d-909c-c1c2de773b12/pull/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.686442 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83dlp2n_0a285e7e-472d-473d-909c-c1c2de773b12/extract/0.log" Nov 28 17:37:04 crc kubenswrapper[4954]: I1128 17:37:04.848229 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j68wt_746b782a-972a-48ef-b07c-6dedfba8ea94/extract-utilities/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.028846 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j68wt_746b782a-972a-48ef-b07c-6dedfba8ea94/extract-utilities/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.032413 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j68wt_746b782a-972a-48ef-b07c-6dedfba8ea94/extract-content/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.052512 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j68wt_746b782a-972a-48ef-b07c-6dedfba8ea94/extract-content/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.229796 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j68wt_746b782a-972a-48ef-b07c-6dedfba8ea94/extract-utilities/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.275449 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j68wt_746b782a-972a-48ef-b07c-6dedfba8ea94/extract-content/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.418047 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wxnll_8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/extract-utilities/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.425539 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j68wt_746b782a-972a-48ef-b07c-6dedfba8ea94/registry-server/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.627372 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wxnll_8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/extract-utilities/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.632406 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wxnll_8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/extract-content/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.638467 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wxnll_8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/extract-content/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.798141 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wxnll_8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/extract-content/0.log" Nov 28 17:37:05 crc kubenswrapper[4954]: I1128 17:37:05.805822 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wxnll_8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/extract-utilities/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.036040 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zp26c_5640eed0-7f64-43b3-98cc-7d54ceb70ab4/extract-utilities/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.248412 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zp26c_5640eed0-7f64-43b3-98cc-7d54ceb70ab4/extract-content/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.330352 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zp26c_5640eed0-7f64-43b3-98cc-7d54ceb70ab4/extract-utilities/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.330378 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zp26c_5640eed0-7f64-43b3-98cc-7d54ceb70ab4/extract-content/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.434860 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-wxnll_8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/registry-server/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.447692 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.458578 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zp26c_5640eed0-7f64-43b3-98cc-7d54ceb70ab4/extract-utilities/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.516350 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zp26c" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.603382 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zp26c_5640eed0-7f64-43b3-98cc-7d54ceb70ab4/extract-content/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.645000 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zp26c"] Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.711107 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wxnll"] Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.711333 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wxnll" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="registry-server" containerID="cri-o://6835f72240ac625d81eb99af7f9bf97ade1c2dc307129c9df59ef1efe5a36d55" gracePeriod=2 Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.757953 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-zp26c_5640eed0-7f64-43b3-98cc-7d54ceb70ab4/registry-server/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.855820 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:37:06 crc kubenswrapper[4954]: E1128 17:37:06.856039 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.896887 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-77lzc_ea94ca81-4305-4775-bfc2-a7ce2a7f47b9/marketplace-operator/0.log" Nov 28 17:37:06 crc kubenswrapper[4954]: I1128 17:37:06.998625 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mls2m_14241127-315f-4637-82e7-6b2e98684240/extract-utilities/0.log" Nov 28 17:37:07 crc kubenswrapper[4954]: I1128 17:37:07.247188 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mls2m_14241127-315f-4637-82e7-6b2e98684240/extract-utilities/0.log" Nov 28 17:37:07 crc kubenswrapper[4954]: I1128 17:37:07.247193 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mls2m_14241127-315f-4637-82e7-6b2e98684240/extract-content/0.log" Nov 28 17:37:07 crc kubenswrapper[4954]: I1128 17:37:07.332905 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mls2m_14241127-315f-4637-82e7-6b2e98684240/extract-content/0.log" Nov 28 17:37:07 crc kubenswrapper[4954]: I1128 17:37:07.570942 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mls2m_14241127-315f-4637-82e7-6b2e98684240/extract-content/0.log" Nov 28 17:37:07 crc kubenswrapper[4954]: I1128 17:37:07.663084 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mls2m_14241127-315f-4637-82e7-6b2e98684240/extract-utilities/0.log" Nov 28 17:37:07 crc kubenswrapper[4954]: I1128 17:37:07.845861 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-mls2m_14241127-315f-4637-82e7-6b2e98684240/registry-server/0.log" Nov 28 17:37:07 crc kubenswrapper[4954]: I1128 17:37:07.983573 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sjfwm_dfe853ea-55a1-4d06-a731-005c0bb33e7c/extract-utilities/0.log" Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.029826 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sjfwm_dfe853ea-55a1-4d06-a731-005c0bb33e7c/extract-content/0.log" Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.029972 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sjfwm_dfe853ea-55a1-4d06-a731-005c0bb33e7c/extract-content/0.log" Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.119289 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sjfwm_dfe853ea-55a1-4d06-a731-005c0bb33e7c/extract-utilities/0.log" Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.375646 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sjfwm_dfe853ea-55a1-4d06-a731-005c0bb33e7c/extract-utilities/0.log" Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.386267 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sjfwm_dfe853ea-55a1-4d06-a731-005c0bb33e7c/extract-content/0.log" Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.823141 4954 generic.go:334] "Generic (PLEG): container finished" podID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerID="6835f72240ac625d81eb99af7f9bf97ade1c2dc307129c9df59ef1efe5a36d55" exitCode=0 Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.823362 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxnll" event={"ID":"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec","Type":"ContainerDied","Data":"6835f72240ac625d81eb99af7f9bf97ade1c2dc307129c9df59ef1efe5a36d55"} Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.868117 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-sjfwm_dfe853ea-55a1-4d06-a731-005c0bb33e7c/registry-server/0.log" Nov 28 17:37:08 crc kubenswrapper[4954]: I1128 17:37:08.977995 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxnll" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.121971 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pgbn\" (UniqueName: \"kubernetes.io/projected/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-kube-api-access-8pgbn\") pod \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.122071 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-utilities\") pod \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.122375 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-catalog-content\") pod \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\" (UID: \"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec\") " Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.122882 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-utilities" (OuterVolumeSpecName: "utilities") pod "8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" (UID: "8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.127930 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-kube-api-access-8pgbn" (OuterVolumeSpecName: "kube-api-access-8pgbn") pod "8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" (UID: "8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec"). InnerVolumeSpecName "kube-api-access-8pgbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.169571 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" (UID: "8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.224582 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pgbn\" (UniqueName: \"kubernetes.io/projected/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-kube-api-access-8pgbn\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.224614 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.224623 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.833265 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxnll" event={"ID":"8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec","Type":"ContainerDied","Data":"66c0f089af2a208d2d8fe9a591e4e6d8406bbbdcb22c5078a4354968d7d4a191"} Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.833313 4954 scope.go:117] "RemoveContainer" containerID="6835f72240ac625d81eb99af7f9bf97ade1c2dc307129c9df59ef1efe5a36d55" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.833433 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxnll" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.858835 4954 scope.go:117] "RemoveContainer" containerID="22c04f03a906a58c724ce5e7fdd51294c602526dd4eccca7a89b58541155a247" Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.871613 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wxnll"] Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.881284 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wxnll"] Nov 28 17:37:09 crc kubenswrapper[4954]: I1128 17:37:09.884280 4954 scope.go:117] "RemoveContainer" containerID="f77874abe0e0d6912b9bfc67dce7510ecfadb53c35346dd4845d3402c48e4965" Nov 28 17:37:11 crc kubenswrapper[4954]: I1128 17:37:11.864668 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" path="/var/lib/kubelet/pods/8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec/volumes" Nov 28 17:37:18 crc kubenswrapper[4954]: I1128 17:37:18.855834 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:37:18 crc kubenswrapper[4954]: E1128 17:37:18.857909 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:37:32 crc kubenswrapper[4954]: I1128 17:37:32.856152 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:37:32 crc kubenswrapper[4954]: E1128 17:37:32.856888 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:37:43 crc kubenswrapper[4954]: I1128 17:37:43.856515 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:37:43 crc kubenswrapper[4954]: E1128 17:37:43.857283 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:37:56 crc kubenswrapper[4954]: I1128 17:37:56.856505 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:37:56 crc kubenswrapper[4954]: E1128 17:37:56.857468 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:38:09 crc kubenswrapper[4954]: I1128 17:38:09.856162 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:38:09 crc kubenswrapper[4954]: E1128 17:38:09.857648 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:38:14 crc kubenswrapper[4954]: I1128 17:38:14.326739 4954 generic.go:334] "Generic (PLEG): container finished" podID="598386fe-f553-4dc1-b298-2b9e66befd85" containerID="0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb" exitCode=0 Nov 28 17:38:14 crc kubenswrapper[4954]: I1128 17:38:14.326806 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" event={"ID":"598386fe-f553-4dc1-b298-2b9e66befd85","Type":"ContainerDied","Data":"0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb"} Nov 28 17:38:14 crc kubenswrapper[4954]: I1128 17:38:14.328013 4954 scope.go:117] "RemoveContainer" containerID="0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb" Nov 28 17:38:14 crc kubenswrapper[4954]: I1128 17:38:14.519612 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wpdqq_must-gather-vl2l8_598386fe-f553-4dc1-b298-2b9e66befd85/gather/0.log" Nov 28 17:38:21 crc kubenswrapper[4954]: I1128 17:38:21.856198 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:38:21 crc kubenswrapper[4954]: E1128 17:38:21.856929 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:38:21 crc kubenswrapper[4954]: I1128 17:38:21.954992 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-wpdqq/must-gather-vl2l8"] Nov 28 17:38:21 crc kubenswrapper[4954]: I1128 17:38:21.955251 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" containerName="copy" containerID="cri-o://ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0" gracePeriod=2 Nov 28 17:38:21 crc kubenswrapper[4954]: I1128 17:38:21.959883 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-wpdqq/must-gather-vl2l8"] Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.281423 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wpdqq_must-gather-vl2l8_598386fe-f553-4dc1-b298-2b9e66befd85/copy/0.log" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.281808 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.385986 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn26t\" (UniqueName: \"kubernetes.io/projected/598386fe-f553-4dc1-b298-2b9e66befd85-kube-api-access-kn26t\") pod \"598386fe-f553-4dc1-b298-2b9e66befd85\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.386119 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/598386fe-f553-4dc1-b298-2b9e66befd85-must-gather-output\") pod \"598386fe-f553-4dc1-b298-2b9e66befd85\" (UID: \"598386fe-f553-4dc1-b298-2b9e66befd85\") " Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.388335 4954 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wpdqq_must-gather-vl2l8_598386fe-f553-4dc1-b298-2b9e66befd85/copy/0.log" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.389189 4954 generic.go:334] "Generic (PLEG): container finished" podID="598386fe-f553-4dc1-b298-2b9e66befd85" containerID="ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0" exitCode=143 Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.389242 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wpdqq/must-gather-vl2l8" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.389248 4954 scope.go:117] "RemoveContainer" containerID="ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.392730 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/598386fe-f553-4dc1-b298-2b9e66befd85-kube-api-access-kn26t" (OuterVolumeSpecName: "kube-api-access-kn26t") pod "598386fe-f553-4dc1-b298-2b9e66befd85" (UID: "598386fe-f553-4dc1-b298-2b9e66befd85"). InnerVolumeSpecName "kube-api-access-kn26t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.399342 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kn26t\" (UniqueName: \"kubernetes.io/projected/598386fe-f553-4dc1-b298-2b9e66befd85-kube-api-access-kn26t\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.426568 4954 scope.go:117] "RemoveContainer" containerID="0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.483716 4954 scope.go:117] "RemoveContainer" containerID="ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0" Nov 28 17:38:22 crc kubenswrapper[4954]: E1128 17:38:22.484203 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0\": container with ID starting with ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0 not found: ID does not exist" containerID="ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.484250 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0"} err="failed to get container status \"ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0\": rpc error: code = NotFound desc = could not find container \"ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0\": container with ID starting with ef2a85ad33cb23d216bdcf8f61c01a32fbf5b79355f90adb7b29a5afd4730da0 not found: ID does not exist" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.484276 4954 scope.go:117] "RemoveContainer" containerID="0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb" Nov 28 17:38:22 crc kubenswrapper[4954]: E1128 17:38:22.484711 4954 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb\": container with ID starting with 0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb not found: ID does not exist" containerID="0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.484742 4954 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb"} err="failed to get container status \"0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb\": rpc error: code = NotFound desc = could not find container \"0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb\": container with ID starting with 0a6854db7b44f6392b9e70e197951101c1a1d1d3225ec33a32bdfc6968cd36bb not found: ID does not exist" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.487131 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/598386fe-f553-4dc1-b298-2b9e66befd85-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "598386fe-f553-4dc1-b298-2b9e66befd85" (UID: "598386fe-f553-4dc1-b298-2b9e66befd85"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:38:22 crc kubenswrapper[4954]: I1128 17:38:22.501288 4954 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/598386fe-f553-4dc1-b298-2b9e66befd85-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:23 crc kubenswrapper[4954]: I1128 17:38:23.867330 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" path="/var/lib/kubelet/pods/598386fe-f553-4dc1-b298-2b9e66befd85/volumes" Nov 28 17:38:32 crc kubenswrapper[4954]: I1128 17:38:32.855776 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:38:32 crc kubenswrapper[4954]: E1128 17:38:32.856727 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:38:46 crc kubenswrapper[4954]: I1128 17:38:46.856487 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:38:46 crc kubenswrapper[4954]: E1128 17:38:46.857775 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:38:58 crc kubenswrapper[4954]: I1128 17:38:58.857026 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:38:58 crc kubenswrapper[4954]: E1128 17:38:58.857978 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:39:09 crc kubenswrapper[4954]: I1128 17:39:09.857506 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:39:09 crc kubenswrapper[4954]: E1128 17:39:09.858542 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:39:21 crc kubenswrapper[4954]: I1128 17:39:21.856121 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:39:21 crc kubenswrapper[4954]: E1128 17:39:21.857226 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:39:33 crc kubenswrapper[4954]: I1128 17:39:33.856267 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:39:33 crc kubenswrapper[4954]: E1128 17:39:33.857230 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:39:48 crc kubenswrapper[4954]: I1128 17:39:48.855898 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:39:48 crc kubenswrapper[4954]: E1128 17:39:48.856639 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:39:59 crc kubenswrapper[4954]: I1128 17:39:59.857185 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:39:59 crc kubenswrapper[4954]: E1128 17:39:59.858446 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:40:10 crc kubenswrapper[4954]: I1128 17:40:10.857181 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:40:10 crc kubenswrapper[4954]: E1128 17:40:10.857905 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:40:25 crc kubenswrapper[4954]: I1128 17:40:25.857168 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:40:25 crc kubenswrapper[4954]: E1128 17:40:25.858089 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.348456 4954 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r672d"] Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.350684 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="extract-utilities" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.350774 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="extract-utilities" Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.350840 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="registry-server" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.350906 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="registry-server" Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.350973 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" containerName="gather" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351031 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" containerName="gather" Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.351094 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="extract-content" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351153 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="extract-content" Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.351217 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="extract-content" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351279 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="extract-content" Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.351355 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" containerName="copy" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351411 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" containerName="copy" Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.351480 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="extract-utilities" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351563 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="extract-utilities" Nov 28 17:40:36 crc kubenswrapper[4954]: E1128 17:40:36.351640 4954 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="registry-server" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351705 4954 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="registry-server" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351885 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" containerName="copy" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.351958 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="598386fe-f553-4dc1-b298-2b9e66befd85" containerName="gather" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.352019 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="84115938-bf42-4f0c-a905-38d853fd735b" containerName="registry-server" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.352089 4954 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b4725ee-80be-4d4e-b7b1-f1e0ac4ab0ec" containerName="registry-server" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.353637 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.380734 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r672d"] Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.405872 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-catalog-content\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.405979 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7r5x\" (UniqueName: \"kubernetes.io/projected/f8395d95-e55c-43be-a85e-2a2a4201c9ff-kube-api-access-s7r5x\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.406010 4954 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-utilities\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.508406 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7r5x\" (UniqueName: \"kubernetes.io/projected/f8395d95-e55c-43be-a85e-2a2a4201c9ff-kube-api-access-s7r5x\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.508551 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-utilities\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.508661 4954 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-catalog-content\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.509167 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-utilities\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.509260 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-catalog-content\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.529098 4954 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7r5x\" (UniqueName: \"kubernetes.io/projected/f8395d95-e55c-43be-a85e-2a2a4201c9ff-kube-api-access-s7r5x\") pod \"certified-operators-r672d\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:36 crc kubenswrapper[4954]: I1128 17:40:36.681764 4954 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:37 crc kubenswrapper[4954]: I1128 17:40:37.191957 4954 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r672d"] Nov 28 17:40:37 crc kubenswrapper[4954]: I1128 17:40:37.456859 4954 generic.go:334] "Generic (PLEG): container finished" podID="f8395d95-e55c-43be-a85e-2a2a4201c9ff" containerID="5e65fdb57adacc247c9c1b03a49a1740b960013a2517d8b799a16b3670a95e05" exitCode=0 Nov 28 17:40:37 crc kubenswrapper[4954]: I1128 17:40:37.457014 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r672d" event={"ID":"f8395d95-e55c-43be-a85e-2a2a4201c9ff","Type":"ContainerDied","Data":"5e65fdb57adacc247c9c1b03a49a1740b960013a2517d8b799a16b3670a95e05"} Nov 28 17:40:37 crc kubenswrapper[4954]: I1128 17:40:37.457342 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r672d" event={"ID":"f8395d95-e55c-43be-a85e-2a2a4201c9ff","Type":"ContainerStarted","Data":"b3cf491b5a9a51ce2114de723f1d14cd9ee392ca137d1427337dc68ddb7f0ef9"} Nov 28 17:40:37 crc kubenswrapper[4954]: I1128 17:40:37.458505 4954 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:40:39 crc kubenswrapper[4954]: I1128 17:40:39.472945 4954 generic.go:334] "Generic (PLEG): container finished" podID="f8395d95-e55c-43be-a85e-2a2a4201c9ff" containerID="614a0864851423b68f2055fe87b831830c90e4b8443dae4f320ec6a4b145be7a" exitCode=0 Nov 28 17:40:39 crc kubenswrapper[4954]: I1128 17:40:39.473055 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r672d" event={"ID":"f8395d95-e55c-43be-a85e-2a2a4201c9ff","Type":"ContainerDied","Data":"614a0864851423b68f2055fe87b831830c90e4b8443dae4f320ec6a4b145be7a"} Nov 28 17:40:40 crc kubenswrapper[4954]: I1128 17:40:40.481427 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r672d" event={"ID":"f8395d95-e55c-43be-a85e-2a2a4201c9ff","Type":"ContainerStarted","Data":"ebbe127cb78621857be5bafe3b476d03072fc106ad2449b839c5cdafd3bf8eeb"} Nov 28 17:40:40 crc kubenswrapper[4954]: I1128 17:40:40.507120 4954 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r672d" podStartSLOduration=1.92985601 podStartE2EDuration="4.5070967s" podCreationTimestamp="2025-11-28 17:40:36 +0000 UTC" firstStartedPulling="2025-11-28 17:40:37.458224205 +0000 UTC m=+5390.849892746" lastFinishedPulling="2025-11-28 17:40:40.035464885 +0000 UTC m=+5393.427133436" observedRunningTime="2025-11-28 17:40:40.506881883 +0000 UTC m=+5393.898550424" watchObservedRunningTime="2025-11-28 17:40:40.5070967 +0000 UTC m=+5393.898765241" Nov 28 17:40:40 crc kubenswrapper[4954]: I1128 17:40:40.856592 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:40:40 crc kubenswrapper[4954]: E1128 17:40:40.856804 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:40:46 crc kubenswrapper[4954]: I1128 17:40:46.682750 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:46 crc kubenswrapper[4954]: I1128 17:40:46.683332 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:46 crc kubenswrapper[4954]: I1128 17:40:46.735075 4954 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:47 crc kubenswrapper[4954]: I1128 17:40:47.582514 4954 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:47 crc kubenswrapper[4954]: I1128 17:40:47.634661 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r672d"] Nov 28 17:40:49 crc kubenswrapper[4954]: I1128 17:40:49.553877 4954 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r672d" podUID="f8395d95-e55c-43be-a85e-2a2a4201c9ff" containerName="registry-server" containerID="cri-o://ebbe127cb78621857be5bafe3b476d03072fc106ad2449b839c5cdafd3bf8eeb" gracePeriod=2 Nov 28 17:40:50 crc kubenswrapper[4954]: I1128 17:40:50.564920 4954 generic.go:334] "Generic (PLEG): container finished" podID="f8395d95-e55c-43be-a85e-2a2a4201c9ff" containerID="ebbe127cb78621857be5bafe3b476d03072fc106ad2449b839c5cdafd3bf8eeb" exitCode=0 Nov 28 17:40:50 crc kubenswrapper[4954]: I1128 17:40:50.565279 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r672d" event={"ID":"f8395d95-e55c-43be-a85e-2a2a4201c9ff","Type":"ContainerDied","Data":"ebbe127cb78621857be5bafe3b476d03072fc106ad2449b839c5cdafd3bf8eeb"} Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.079492 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.190650 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-catalog-content\") pod \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.190748 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7r5x\" (UniqueName: \"kubernetes.io/projected/f8395d95-e55c-43be-a85e-2a2a4201c9ff-kube-api-access-s7r5x\") pod \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.190792 4954 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-utilities\") pod \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\" (UID: \"f8395d95-e55c-43be-a85e-2a2a4201c9ff\") " Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.192163 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-utilities" (OuterVolumeSpecName: "utilities") pod "f8395d95-e55c-43be-a85e-2a2a4201c9ff" (UID: "f8395d95-e55c-43be-a85e-2a2a4201c9ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.195318 4954 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.197360 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8395d95-e55c-43be-a85e-2a2a4201c9ff-kube-api-access-s7r5x" (OuterVolumeSpecName: "kube-api-access-s7r5x") pod "f8395d95-e55c-43be-a85e-2a2a4201c9ff" (UID: "f8395d95-e55c-43be-a85e-2a2a4201c9ff"). InnerVolumeSpecName "kube-api-access-s7r5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.250631 4954 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8395d95-e55c-43be-a85e-2a2a4201c9ff" (UID: "f8395d95-e55c-43be-a85e-2a2a4201c9ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.297203 4954 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8395d95-e55c-43be-a85e-2a2a4201c9ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.297240 4954 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7r5x\" (UniqueName: \"kubernetes.io/projected/f8395d95-e55c-43be-a85e-2a2a4201c9ff-kube-api-access-s7r5x\") on node \"crc\" DevicePath \"\"" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.575295 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r672d" event={"ID":"f8395d95-e55c-43be-a85e-2a2a4201c9ff","Type":"ContainerDied","Data":"b3cf491b5a9a51ce2114de723f1d14cd9ee392ca137d1427337dc68ddb7f0ef9"} Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.575345 4954 scope.go:117] "RemoveContainer" containerID="ebbe127cb78621857be5bafe3b476d03072fc106ad2449b839c5cdafd3bf8eeb" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.575459 4954 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r672d" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.603666 4954 scope.go:117] "RemoveContainer" containerID="614a0864851423b68f2055fe87b831830c90e4b8443dae4f320ec6a4b145be7a" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.621650 4954 scope.go:117] "RemoveContainer" containerID="5e65fdb57adacc247c9c1b03a49a1740b960013a2517d8b799a16b3670a95e05" Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.639575 4954 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r672d"] Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.647404 4954 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r672d"] Nov 28 17:40:51 crc kubenswrapper[4954]: I1128 17:40:51.870380 4954 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8395d95-e55c-43be-a85e-2a2a4201c9ff" path="/var/lib/kubelet/pods/f8395d95-e55c-43be-a85e-2a2a4201c9ff/volumes" Nov 28 17:40:54 crc kubenswrapper[4954]: I1128 17:40:54.855579 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:40:54 crc kubenswrapper[4954]: E1128 17:40:54.856124 4954 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-jprxj_openshift-machine-config-operator(92ddd1ce-e1c1-4606-8b8d-066aeba50079)\"" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" podUID="92ddd1ce-e1c1-4606-8b8d-066aeba50079" Nov 28 17:41:05 crc kubenswrapper[4954]: I1128 17:41:05.855995 4954 scope.go:117] "RemoveContainer" containerID="059f5e3e5413046fba4e0deba70e4ec334c9a67ca51f9cc7bf3c36219ded7416" Nov 28 17:41:06 crc kubenswrapper[4954]: I1128 17:41:06.735661 4954 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-jprxj" event={"ID":"92ddd1ce-e1c1-4606-8b8d-066aeba50079","Type":"ContainerStarted","Data":"14170527bfc343238da24ab035719cb8817dcf2b0a22c141dcea89e1954e7a54"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112357347024455 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112357347017372 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112344236016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112344236015456 5ustar corecore